mirror of
git://nv-tegra.nvidia.com/tegra/kernel-src/nv-kernel-display-driver.git
synced 2025-12-24 10:41:52 +03:00
Compare commits
3 Commits
l4t/l4t-r3
...
l4t/l4t-r3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5823798c3c | ||
|
|
e921c98e92 | ||
|
|
a66f0feff7 |
141
CODE_OF_CONDUCT.md
Normal file
141
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contribute to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, or to ban temporarily or permanently any
|
||||
contributor for other behaviors that they deem inappropriate, threatening,
|
||||
offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when
|
||||
an individual is representing the project or its community. Examples of representing
|
||||
our community include using an official e-mail address, posting via an official
|
||||
social media account, or acting as an appointed representative at an online or
|
||||
offline event. Representation of a project may be further defined and clarified
|
||||
by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders and moderators responsible for enforcement at
|
||||
GitHub_Conduct@nvidia.com.
|
||||
All complaints will be reviewed and investigated and will result in a response
|
||||
that is deemed necessary and appropriate to the circumstances. Leaders and moderators
|
||||
are obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Moderators who do not follow or enforce the Code of Conduct in good faith
|
||||
may face temporary or permanent repercussions as determined by other members of the
|
||||
community’s leadership.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders and moderators will follow these Community Impact Guidelines
|
||||
in determining the consequences for any action they deem in violation of this
|
||||
Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community moderators, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating an egregious single violation, or a pattern of
|
||||
violation of community standards, including sustained inappropriate behavior,
|
||||
harassment of an individual, or aggression toward or disparagement of classes of
|
||||
individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
||||
21
CONTRIBUTING.md
Normal file
21
CONTRIBUTING.md
Normal file
@@ -0,0 +1,21 @@
|
||||
Thank you for all the enthusiasm around open-gpu-kernel-modules.
|
||||
|
||||
## Non-functional (cosmetic) changes
|
||||
|
||||
While we appreciate your enthusiasm, we have decided not to accept non-functional changes such as
|
||||
non-code typo fixes, comment and language adjustments, whitespace changes, and similar.
|
||||
|
||||
Changes going into this codebase incur significant overhead. As such, we want to focus our resources
|
||||
on executable code improvements for now.
|
||||
|
||||
If you have questions, or are unsure about the nature of your desired change, please ask us on the
|
||||
[Discussion boards](https://github.com/NVIDIA/open-gpu-kernel-modules/discussions)!
|
||||
|
||||
## Code style
|
||||
|
||||
We currently do not publish a code style guide, as we have many different components coming together.
|
||||
Please read the existing code in the repository, especially the one surrounding your proposed change,
|
||||
to get a feel for what you should aim for.
|
||||
|
||||
Don't worry too much about it! We are happy to guide you through any neccessary style changes through
|
||||
code review of your PR.
|
||||
@@ -6,9 +6,9 @@
|
||||
# To install the build kernel modules: run (as root) `make modules_install`
|
||||
###########################################################################
|
||||
|
||||
include utils.mk
|
||||
|
||||
all: modules
|
||||
###########################################################################
|
||||
# variables
|
||||
###########################################################################
|
||||
|
||||
nv_kernel_o = src/nvidia/$(OUTPUTDIR)/nv-kernel.o
|
||||
nv_kernel_o_binary = kernel-open/nvidia/nv-kernel.o_binary
|
||||
@@ -16,13 +16,20 @@ nv_kernel_o_binary = kernel-open/nvidia/nv-kernel.o_binary
|
||||
nv_modeset_kernel_o = src/nvidia-modeset/$(OUTPUTDIR)/nv-modeset-kernel.o
|
||||
nv_modeset_kernel_o_binary = kernel-open/nvidia-modeset/nv-modeset-kernel.o_binary
|
||||
|
||||
.PHONY: $(nv_kernel_o) $(nv_modeset_kernel_o) modules modules_install
|
||||
###########################################################################
|
||||
# rules
|
||||
###########################################################################
|
||||
|
||||
include utils.mk
|
||||
|
||||
.PHONY: all
|
||||
all: modules
|
||||
|
||||
###########################################################################
|
||||
# nv-kernel.o is the OS agnostic portion of nvidia.ko
|
||||
###########################################################################
|
||||
|
||||
.PHONY: $(nv_kernel_o)
|
||||
$(nv_kernel_o):
|
||||
$(MAKE) -C src/nvidia
|
||||
|
||||
@@ -34,6 +41,7 @@ $(nv_kernel_o_binary): $(nv_kernel_o)
|
||||
# nv-modeset-kernel.o is the OS agnostic portion of nvidia-modeset.ko
|
||||
###########################################################################
|
||||
|
||||
.PHONY: $(nv_modeset_kernel_o)
|
||||
$(nv_modeset_kernel_o):
|
||||
$(MAKE) -C src/nvidia-modeset
|
||||
|
||||
@@ -46,31 +54,33 @@ $(nv_modeset_kernel_o_binary): $(nv_modeset_kernel_o)
|
||||
# the kernel modules with kbuild.
|
||||
###########################################################################
|
||||
|
||||
.PHONY: modules
|
||||
modules: $(nv_kernel_o_binary) $(nv_modeset_kernel_o_binary)
|
||||
$(MAKE) -C kernel-open modules
|
||||
|
||||
|
||||
###########################################################################
|
||||
# Install the built kernel modules using kbuild.
|
||||
###########################################################################
|
||||
|
||||
.PHONY: modules_install
|
||||
modules_install:
|
||||
$(MAKE) -C kernel-open modules_install
|
||||
|
||||
|
||||
###########################################################################
|
||||
# clean
|
||||
###########################################################################
|
||||
|
||||
.PHONY: clean nvidia.clean nvidia-modeset.clean kernel-open.clean
|
||||
|
||||
.PHONY: clean
|
||||
clean: nvidia.clean nvidia-modeset.clean kernel-open.clean
|
||||
|
||||
.PHONY: nvidia.clean
|
||||
nvidia.clean:
|
||||
$(MAKE) -C src/nvidia clean
|
||||
|
||||
.PHONY: nvidia-modeset.clean
|
||||
nvidia-modeset.clean:
|
||||
$(MAKE) -C src/nvidia-modeset clean
|
||||
|
||||
.PHONY: kernel-open.clean
|
||||
kernel-open.clean:
|
||||
$(MAKE) -C kernel-open clean
|
||||
@@ -1,160 +0,0 @@
|
||||
# NVIDIA Linux Open GPU Kernel Module Source
|
||||
|
||||
This is the source release of the NVIDIA Linux open GPU kernel modules,
|
||||
version 35.2.1.
|
||||
|
||||
|
||||
## How to Build
|
||||
|
||||
To build:
|
||||
|
||||
make modules -j`nproc`
|
||||
|
||||
To install, first uninstall any existing NVIDIA kernel modules. Then,
|
||||
as root:
|
||||
|
||||
make modules_install -j`nproc`
|
||||
|
||||
Note that the kernel modules built here must be used with gsp.bin
|
||||
firmware and user-space NVIDIA GPU driver components from a corresponding
|
||||
35.2.1 driver release. This can be achieved by installing
|
||||
the NVIDIA GPU driver from the .run file using the `--no-kernel-modules`
|
||||
option. E.g.,
|
||||
|
||||
sh ./NVIDIA-Linux-[...].run --no-kernel-modules
|
||||
|
||||
|
||||
## Supported Target CPU Architectures
|
||||
|
||||
Currently, the kernel modules can be built for x86_64 or aarch64.
|
||||
If cross-compiling, set these variables on the make command line:
|
||||
|
||||
TARGET_ARCH=aarch64|x86_64
|
||||
CC
|
||||
LD
|
||||
AR
|
||||
CXX
|
||||
OBJCOPY
|
||||
|
||||
E.g.,
|
||||
|
||||
# compile on x86_64 for aarch64
|
||||
make modules -j`nproc` \
|
||||
TARGET_ARCH=aarch64 \
|
||||
CC=aarch64-linux-gnu-gcc \
|
||||
LD=aarch64-linux-gnu-ld \
|
||||
AR=aarch64-linux-gnu-ar \
|
||||
CXX=aarch64-linux-gnu-g++ \
|
||||
OBJCOPY=aarch64-linux-gnu-objcopy
|
||||
|
||||
|
||||
## Other Build Knobs
|
||||
|
||||
NV_VERBOSE - Set this to "1" to print each complete command executed;
|
||||
otherwise, a succinct "CC" line is printed.
|
||||
|
||||
DEBUG - Set this to "1" to build the kernel modules as debug. By default, the
|
||||
build compiles without debugging information. This also enables
|
||||
various debug log messages in the kernel modules.
|
||||
|
||||
These variables can be set on the make command line. E.g.,
|
||||
|
||||
make modules -j`nproc` NV_VERBOSE=1
|
||||
|
||||
|
||||
## Supported Toolchains
|
||||
|
||||
Any reasonably modern version of gcc or clang can be used to build the
|
||||
kernel modules. Note that the kernel interface layers of the kernel
|
||||
modules must be built with the toolchain that was used to build the
|
||||
kernel.
|
||||
|
||||
|
||||
## Supported Linux Kernel Versions
|
||||
|
||||
The NVIDIA open kernel modules support the same range of Linux kernel
|
||||
versions that are supported with the proprietary NVIDIA kernel modules.
|
||||
This is currently Linux kernel 3.10 or newer.
|
||||
|
||||
|
||||
## How to Contribute
|
||||
|
||||
Contributions can be made by creating a pull request on
|
||||
https://github.com/NVIDIA/open-gpu-kernel-modules
|
||||
We'll respond via github.
|
||||
|
||||
Note that when submitting a pull request, you will be prompted to accept
|
||||
a Contributor License Agreement.
|
||||
|
||||
This code base is shared with NVIDIA's proprietary drivers, and various
|
||||
processing is performed on the shared code to produce the source code that is
|
||||
published here. This has several implications for the foreseeable future:
|
||||
|
||||
* The github repo will function mostly as a snapshot of each driver
|
||||
release.
|
||||
|
||||
* We do not expect to be able to provide revision history for individual
|
||||
changes that were made to NVIDIA's shared code base. There will likely
|
||||
only be one git commit per driver release.
|
||||
|
||||
* We may not be able to reflect individual contributions as separate
|
||||
git commits in the github repo.
|
||||
|
||||
* Because the code undergoes various processing prior to publishing here,
|
||||
contributions made here require manual merging to be applied to the shared
|
||||
code base. Therefore, large refactoring changes made here may be difficult to
|
||||
merge and accept back into the shared code base. If you have large
|
||||
refactoring to suggest, please contact in advance, so we can coordinate.
|
||||
|
||||
|
||||
## How to Report Issues
|
||||
|
||||
Any of the existing bug reporting venues can be used to communicate
|
||||
problems to NVIDIA, such as our forum:
|
||||
|
||||
https://forums.developer.nvidia.com/c/gpu-graphics/linux/148
|
||||
|
||||
or linux-bugs@nvidia.com.
|
||||
|
||||
Please see the 'NVIDIA Contact Info and Additional Resources' section
|
||||
of the NVIDIA GPU Driver README for details.
|
||||
|
||||
Please see the separate [SECURITY.md](SECURITY.md) document if you
|
||||
believe you have discovered a security vulnerability in this software.
|
||||
|
||||
|
||||
## Kernel Interface and OS-Agnostic Components of Kernel Modules
|
||||
|
||||
Most of NVIDIA's kernel modules are split into two components:
|
||||
|
||||
* An "OS-agnostic" component: this is the component of each kernel module
|
||||
that is independent of operating system.
|
||||
|
||||
* A "kernel interface layer": this is the component of each kernel module
|
||||
that is specific to the Linux kernel version and configuration.
|
||||
|
||||
When packaged in the NVIDIA .run installation package, the OS-agnostic
|
||||
component is provided as a binary: it is large and time-consuming to
|
||||
compile, so pre-built versions are provided so that the user does
|
||||
not have to compile it during every driver installation. For the
|
||||
nvidia.ko kernel module, this component is named "nv-kernel.o_binary".
|
||||
For the nvidia-modeset.ko kernel module, this component is named
|
||||
"nv-modeset-kernel.o_binary". Neither nvidia-drm.ko nor nvidia-uvm.ko
|
||||
have OS-agnostic components.
|
||||
|
||||
The kernel interface layer component for each kernel module must be built
|
||||
for the target kernel.
|
||||
|
||||
|
||||
## Directory Structure Layout
|
||||
|
||||
- `kernel-open/` The kernel interface layer
|
||||
- `kernel-open/nvidia/` The kernel interface layer for nvidia.ko
|
||||
- `kernel-open/nvidia-drm/` The kernel interface layer for nvidia-drm.ko
|
||||
- `kernel-open/nvidia-modeset/` The kernel interface layer for nvidia-modeset.ko
|
||||
- `kernel-open/nvidia-uvm/` The kernel interface layer for nvidia-uvm.ko
|
||||
|
||||
- `src/` The OS-agnostic code
|
||||
- `src/nvidia/` The OS-agnostic code for nvidia.ko
|
||||
- `src/nvidia-modeset/` The OS-agnostic code for nvidia-modeset.ko
|
||||
- `src/common/` Utility code used by one or more of nvidia.ko and nvidia-modeset.ko
|
||||
@@ -1,264 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef __NV_MM_H__
|
||||
#define __NV_MM_H__
|
||||
|
||||
#include "conftest.h"
|
||||
|
||||
#if !defined(NV_VM_FAULT_T_IS_PRESENT)
|
||||
typedef int vm_fault_t;
|
||||
#endif
|
||||
|
||||
/* pin_user_pages
|
||||
* Presence of pin_user_pages() also implies the presence of unpin-user_page().
|
||||
* Both were added in the v5.6-rc1
|
||||
*
|
||||
* pin_user_pages() was added by commit eddb1c228f7951d399240
|
||||
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6-rc1 (2020-01-30)
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#if defined(NV_PIN_USER_PAGES_PRESENT)
|
||||
#define NV_PIN_USER_PAGES pin_user_pages
|
||||
#define NV_UNPIN_USER_PAGE unpin_user_page
|
||||
#else
|
||||
#define NV_PIN_USER_PAGES NV_GET_USER_PAGES
|
||||
#define NV_UNPIN_USER_PAGE put_page
|
||||
#endif // NV_PIN_USER_PAGES_PRESENT
|
||||
|
||||
/* get_user_pages
|
||||
*
|
||||
* The 8-argument version of get_user_pages was deprecated by commit
|
||||
* (2016 Feb 12: cde70140fed8429acf7a14e2e2cbd3e329036653)for the non-remote case
|
||||
* (calling get_user_pages with current and current->mm).
|
||||
*
|
||||
* Completely moved to the 6 argument version of get_user_pages -
|
||||
* 2016 Apr 4: c12d2da56d0e07d230968ee2305aaa86b93a6832
|
||||
*
|
||||
* write and force parameters were replaced with gup_flags by -
|
||||
* 2016 Oct 12: 768ae309a96103ed02eb1e111e838c87854d8b51
|
||||
*
|
||||
* A 7-argument version of get_user_pages was introduced into linux-4.4.y by
|
||||
* commit 8e50b8b07f462ab4b91bc1491b1c91bd75e4ad40 which cherry-picked the
|
||||
* replacement of the write and force parameters with gup_flags
|
||||
*
|
||||
*/
|
||||
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_FLAGS)
|
||||
#define NV_GET_USER_PAGES get_user_pages
|
||||
#elif defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_FLAGS)
|
||||
#define NV_GET_USER_PAGES(start, nr_pages, flags, pages, vmas) \
|
||||
get_user_pages(current, current->mm, start, nr_pages, flags, pages, vmas)
|
||||
#else
|
||||
static inline long NV_GET_USER_PAGES(unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
unsigned int flags,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas)
|
||||
{
|
||||
int write = flags & FOLL_WRITE;
|
||||
int force = flags & FOLL_FORCE;
|
||||
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE)
|
||||
return get_user_pages(start, nr_pages, write, force, pages, vmas);
|
||||
#else
|
||||
// NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE
|
||||
return get_user_pages(current, current->mm, start, nr_pages, write,
|
||||
force, pages, vmas);
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_WRITE_FORCE
|
||||
}
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_FLAGS
|
||||
|
||||
/* pin_user_pages_remote
|
||||
*
|
||||
* pin_user_pages_remote() was added by commit eddb1c228f7951d399240
|
||||
* ("mm/gup: introduce pin_user_pages*() and FOLL_PIN") in v5.6 (2020-01-30)
|
||||
*
|
||||
* pin_user_pages_remote() removed 'tsk' parameter by commit
|
||||
* 64019a2e467a ("mm/gup: remove task_struct pointer for all gup code")
|
||||
* in v5.9-rc1 (2020-08-11). *
|
||||
*
|
||||
*/
|
||||
|
||||
#if defined(NV_PIN_USER_PAGES_REMOTE_PRESENT)
|
||||
#if defined (NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK)
|
||||
#define NV_PIN_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
pin_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
|
||||
#else
|
||||
#define NV_PIN_USER_PAGES_REMOTE pin_user_pages_remote
|
||||
#endif // NV_PIN_USER_PAGES_REMOTE_HAS_ARGS_TSK
|
||||
#else
|
||||
#define NV_PIN_USER_PAGES_REMOTE NV_GET_USER_PAGES_REMOTE
|
||||
#endif // NV_PIN_USER_PAGES_REMOTE_PRESENT
|
||||
|
||||
/*
|
||||
* get_user_pages_remote() was added by commit 1e9877902dc7
|
||||
* ("mm/gup: Introduce get_user_pages_remote()") in v4.6 (2016-02-12).
|
||||
*
|
||||
* Note that get_user_pages_remote() requires the caller to hold a reference on
|
||||
* the task_struct (if non-NULL and if this API has tsk argument) and the mm_struct.
|
||||
* This will always be true when using current and current->mm. If the kernel passes
|
||||
* the driver a vma via driver callback, the kernel holds a reference on vma->vm_mm
|
||||
* over that callback.
|
||||
*
|
||||
* get_user_pages_remote() write/force parameters were replaced
|
||||
* with gup_flags by commit 9beae1ea8930 ("mm: replace get_user_pages_remote()
|
||||
* write/force parameters with gup_flags") in v4.9 (2016-10-13).
|
||||
*
|
||||
* get_user_pages_remote() added 'locked' parameter by commit 5b56d49fc31d
|
||||
* ("mm: add locked parameter to get_user_pages_remote()") in
|
||||
* v4.10 (2016-12-14).
|
||||
*
|
||||
* get_user_pages_remote() removed 'tsk' parameter by
|
||||
* commit 64019a2e467a ("mm/gup: remove task_struct pointer for
|
||||
* all gup code") in v5.9-rc1 (2020-08-11).
|
||||
*
|
||||
*/
|
||||
|
||||
#if defined(NV_GET_USER_PAGES_REMOTE_PRESENT)
|
||||
#if defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED)
|
||||
#define NV_GET_USER_PAGES_REMOTE get_user_pages_remote
|
||||
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS_LOCKED)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas, locked)
|
||||
|
||||
#elif defined(NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_FLAGS)
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
get_user_pages_remote(NULL, mm, start, nr_pages, flags, pages, vmas)
|
||||
|
||||
#else
|
||||
// NV_GET_USER_PAGES_REMOTE_HAS_ARGS_TSK_WRITE_FORCE
|
||||
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
unsigned int flags,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas,
|
||||
int *locked)
|
||||
{
|
||||
int write = flags & FOLL_WRITE;
|
||||
int force = flags & FOLL_FORCE;
|
||||
|
||||
return get_user_pages_remote(NULL, mm, start, nr_pages, write, force,
|
||||
pages, vmas);
|
||||
}
|
||||
#endif // NV_GET_USER_PAGES_REMOTE_HAS_ARGS_FLAGS_LOCKED
|
||||
#else
|
||||
#if defined(NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE)
|
||||
static inline long NV_GET_USER_PAGES_REMOTE(struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long nr_pages,
|
||||
unsigned int flags,
|
||||
struct page **pages,
|
||||
struct vm_area_struct **vmas,
|
||||
int *locked)
|
||||
{
|
||||
int write = flags & FOLL_WRITE;
|
||||
int force = flags & FOLL_FORCE;
|
||||
|
||||
return get_user_pages(NULL, mm, start, nr_pages, write, force, pages, vmas);
|
||||
}
|
||||
|
||||
#else
|
||||
#define NV_GET_USER_PAGES_REMOTE(mm, start, nr_pages, flags, pages, vmas, locked) \
|
||||
get_user_pages(NULL, mm, start, nr_pages, flags, pages, vmas)
|
||||
#endif // NV_GET_USER_PAGES_HAS_ARGS_TSK_WRITE_FORCE
|
||||
#endif // NV_GET_USER_PAGES_REMOTE_PRESENT
|
||||
|
||||
/*
|
||||
* The .virtual_address field was effectively renamed to .address, by these
|
||||
* two commits:
|
||||
*
|
||||
* struct vm_fault: .address was added by:
|
||||
* 2016-12-14 82b0f8c39a3869b6fd2a10e180a862248736ec6f
|
||||
*
|
||||
* struct vm_fault: .virtual_address was removed by:
|
||||
* 2016-12-14 1a29d85eb0f19b7d8271923d8917d7b4f5540b3e
|
||||
*/
|
||||
static inline unsigned long nv_page_fault_va(struct vm_fault *vmf)
|
||||
{
|
||||
#if defined(NV_VM_FAULT_HAS_ADDRESS)
|
||||
return vmf->address;
|
||||
#else
|
||||
return (unsigned long)(vmf->virtual_address);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_mmap_read_lock(struct mm_struct *mm)
|
||||
{
|
||||
#if defined(NV_MM_HAS_MMAP_LOCK)
|
||||
mmap_read_lock(mm);
|
||||
#else
|
||||
down_read(&mm->mmap_sem);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_mmap_read_unlock(struct mm_struct *mm)
|
||||
{
|
||||
#if defined(NV_MM_HAS_MMAP_LOCK)
|
||||
mmap_read_unlock(mm);
|
||||
#else
|
||||
up_read(&mm->mmap_sem);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_mmap_write_lock(struct mm_struct *mm)
|
||||
{
|
||||
#if defined(NV_MM_HAS_MMAP_LOCK)
|
||||
mmap_write_lock(mm);
|
||||
#else
|
||||
down_write(&mm->mmap_sem);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_mmap_write_unlock(struct mm_struct *mm)
|
||||
{
|
||||
#if defined(NV_MM_HAS_MMAP_LOCK)
|
||||
mmap_write_unlock(mm);
|
||||
#else
|
||||
up_write(&mm->mmap_sem);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int nv_mm_rwsem_is_locked(struct mm_struct *mm)
|
||||
{
|
||||
#if defined(NV_MM_HAS_MMAP_LOCK)
|
||||
return rwsem_is_locked(&mm->mmap_lock);
|
||||
#else
|
||||
return rwsem_is_locked(&mm->mmap_sem);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct rw_semaphore *nv_mmap_get_lock(struct mm_struct *mm)
|
||||
{
|
||||
#if defined(NV_MM_HAS_MMAP_LOCK)
|
||||
return &mm->mmap_lock;
|
||||
#else
|
||||
return &mm->mmap_sem;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // __NV_MM_H__
|
||||
@@ -1,55 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef _NV_REGISTER_MODULE_H_
|
||||
#define _NV_REGISTER_MODULE_H_
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/poll.h>
|
||||
|
||||
#include "nvtypes.h"
|
||||
|
||||
typedef struct nvidia_module_s {
|
||||
struct module *owner;
|
||||
|
||||
/* nvidia0, nvidia1 ..*/
|
||||
const char *module_name;
|
||||
|
||||
/* module instance */
|
||||
NvU32 instance;
|
||||
|
||||
/* file operations */
|
||||
int (*open)(struct inode *, struct file *filp);
|
||||
int (*close)(struct inode *, struct file *filp);
|
||||
int (*mmap)(struct file *filp, struct vm_area_struct *vma);
|
||||
int (*ioctl)(struct inode *, struct file * file, unsigned int cmd, unsigned long arg);
|
||||
unsigned int (*poll)(struct file * file, poll_table *wait);
|
||||
|
||||
} nvidia_module_t;
|
||||
|
||||
int nvidia_register_module(nvidia_module_t *);
|
||||
int nvidia_unregister_module(nvidia_module_t *);
|
||||
|
||||
#endif
|
||||
@@ -1,257 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Os interface definitions needed by os-interface.c
|
||||
*/
|
||||
|
||||
#ifndef OS_INTERFACE_H
|
||||
#define OS_INTERFACE_H
|
||||
|
||||
/******************* Operating System Interface Routines *******************\
|
||||
* *
|
||||
* Operating system wrapper functions used to abstract the OS. *
|
||||
* *
|
||||
\***************************************************************************/
|
||||
|
||||
#include <nvtypes.h>
|
||||
#include <nvstatus.h>
|
||||
#include "nv_stdarg.h"
|
||||
#include <nv-kernel-interface-api.h>
|
||||
#include <os/nv_memory_type.h>
|
||||
#include <nv-caps.h>
|
||||
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvU32 os_major_version;
|
||||
NvU32 os_minor_version;
|
||||
NvU32 os_build_number;
|
||||
const char * os_build_version_str;
|
||||
const char * os_build_date_plus_str;
|
||||
}os_version_info;
|
||||
|
||||
/* Each OS defines its own version of this opaque type */
|
||||
struct os_work_queue;
|
||||
|
||||
/* Each OS defines its own version of this opaque type */
|
||||
typedef struct os_wait_queue os_wait_queue;
|
||||
|
||||
/*
|
||||
* ---------------------------------------------------------------------------
|
||||
*
|
||||
* Function prototypes for OS interface.
|
||||
*
|
||||
* ---------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
NvU64 NV_API_CALL os_get_num_phys_pages (void);
|
||||
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
|
||||
void NV_API_CALL os_free_mem (void *);
|
||||
NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *);
|
||||
NvU64 NV_API_CALL os_get_current_tick (void);
|
||||
NvU64 NV_API_CALL os_get_current_tick_hr (void);
|
||||
NvU64 NV_API_CALL os_get_tick_resolution (void);
|
||||
NV_STATUS NV_API_CALL os_delay (NvU32);
|
||||
NV_STATUS NV_API_CALL os_delay_us (NvU32);
|
||||
NvU64 NV_API_CALL os_get_cpu_frequency (void);
|
||||
NvU32 NV_API_CALL os_get_current_process (void);
|
||||
void NV_API_CALL os_get_current_process_name (char *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *);
|
||||
char* NV_API_CALL os_string_copy (char *, const char *);
|
||||
NvU32 NV_API_CALL os_string_length (const char *);
|
||||
NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32);
|
||||
NvS32 NV_API_CALL os_string_compare (const char *, const char *);
|
||||
NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...);
|
||||
NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list);
|
||||
void NV_API_CALL os_log_error (const char *, va_list);
|
||||
void* NV_API_CALL os_mem_copy (void *, const void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32);
|
||||
void* NV_API_CALL os_mem_set (void *, NvU8, NvU32);
|
||||
NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32);
|
||||
void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *);
|
||||
NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *);
|
||||
NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *);
|
||||
NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *);
|
||||
NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8);
|
||||
NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16);
|
||||
NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32);
|
||||
NvBool NV_API_CALL os_pci_remove_supported (void);
|
||||
void NV_API_CALL os_pci_remove (void *);
|
||||
void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
|
||||
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
|
||||
void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **);
|
||||
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
|
||||
NV_STATUS NV_API_CALL os_flush_cpu_cache (void);
|
||||
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
|
||||
NV_STATUS NV_API_CALL os_flush_user_cache (void);
|
||||
void NV_API_CALL os_flush_cpu_write_combine_buffer(void);
|
||||
NvU8 NV_API_CALL os_io_read_byte (NvU32);
|
||||
NvU16 NV_API_CALL os_io_read_word (NvU32);
|
||||
NvU32 NV_API_CALL os_io_read_dword (NvU32);
|
||||
void NV_API_CALL os_io_write_byte (NvU32, NvU8);
|
||||
void NV_API_CALL os_io_write_word (NvU32, NvU16);
|
||||
void NV_API_CALL os_io_write_dword (NvU32, NvU32);
|
||||
NvBool NV_API_CALL os_is_administrator (void);
|
||||
NvBool NV_API_CALL os_allow_priority_override (void);
|
||||
void NV_API_CALL os_dbg_init (void);
|
||||
void NV_API_CALL os_dbg_breakpoint (void);
|
||||
void NV_API_CALL os_dbg_set_level (NvU32);
|
||||
NvU32 NV_API_CALL os_get_cpu_count (void);
|
||||
NvU32 NV_API_CALL os_get_cpu_number (void);
|
||||
void NV_API_CALL os_disable_console_access (void);
|
||||
void NV_API_CALL os_enable_console_access (void);
|
||||
NV_STATUS NV_API_CALL os_registry_init (void);
|
||||
NV_STATUS NV_API_CALL os_schedule (void);
|
||||
NV_STATUS NV_API_CALL os_alloc_spinlock (void **);
|
||||
void NV_API_CALL os_free_spinlock (void *);
|
||||
NvU64 NV_API_CALL os_acquire_spinlock (void *);
|
||||
void NV_API_CALL os_release_spinlock (void *, NvU64);
|
||||
NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *);
|
||||
NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *);
|
||||
NV_STATUS NV_API_CALL os_alloc_mutex (void **);
|
||||
void NV_API_CALL os_free_mutex (void *);
|
||||
NV_STATUS NV_API_CALL os_acquire_mutex (void *);
|
||||
NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *);
|
||||
void NV_API_CALL os_release_mutex (void *);
|
||||
void* NV_API_CALL os_alloc_semaphore (NvU32);
|
||||
void NV_API_CALL os_free_semaphore (void *);
|
||||
NV_STATUS NV_API_CALL os_acquire_semaphore (void *);
|
||||
NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *);
|
||||
NV_STATUS NV_API_CALL os_release_semaphore (void *);
|
||||
NvBool NV_API_CALL os_semaphore_may_sleep (void);
|
||||
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
|
||||
NvBool NV_API_CALL os_is_isr (void);
|
||||
NvBool NV_API_CALL os_pat_supported (void);
|
||||
void NV_API_CALL os_dump_stack (void);
|
||||
NvBool NV_API_CALL os_is_efi_enabled (void);
|
||||
NvBool NV_API_CALL os_is_xen_dom0 (void);
|
||||
NvBool NV_API_CALL os_is_vgx_hyper (void);
|
||||
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
|
||||
NvBool NV_API_CALL os_is_grid_supported (void);
|
||||
NvU32 NV_API_CALL os_get_grid_csp_support (void);
|
||||
void NV_API_CALL os_get_screen_info (NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64, NvU64);
|
||||
void NV_API_CALL os_bug_check (NvU32, const char *);
|
||||
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
|
||||
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**);
|
||||
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *);
|
||||
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
|
||||
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
|
||||
NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *);
|
||||
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
|
||||
void NV_API_CALL os_delete_record_for_crashLog (void *);
|
||||
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
|
||||
NV_STATUS NV_API_CALL os_put_page (NvU64 address);
|
||||
NvU32 NV_API_CALL os_get_page_refcount (NvU64 address);
|
||||
NvU32 NV_API_CALL os_count_tail_pages (NvU64 address);
|
||||
void NV_API_CALL os_free_pages_phys (NvU64, NvU32);
|
||||
NV_STATUS NV_API_CALL os_call_nv_vmbus (NvU32, void *);
|
||||
NV_STATUS NV_API_CALL os_open_temporary_file (void **);
|
||||
void NV_API_CALL os_close_file (void *);
|
||||
NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64);
|
||||
NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64);
|
||||
NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **);
|
||||
NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64);
|
||||
NvBool NV_API_CALL os_is_nvswitch_present (void);
|
||||
void NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
|
||||
NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **);
|
||||
void NV_API_CALL os_free_wait_queue (os_wait_queue *);
|
||||
void NV_API_CALL os_wait_uninterruptible (os_wait_queue *);
|
||||
void NV_API_CALL os_wait_interruptible (os_wait_queue *);
|
||||
void NV_API_CALL os_wake_up (os_wait_queue *);
|
||||
nv_cap_t* NV_API_CALL os_nv_cap_init (const char *);
|
||||
nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int);
|
||||
nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int);
|
||||
void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
|
||||
int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int);
|
||||
void NV_API_CALL os_nv_cap_close_fd (int);
|
||||
|
||||
|
||||
NV_STATUS NV_API_CALL os_get_tegra_platform (NvU32 *);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
extern NvU32 os_page_size;
|
||||
extern NvU64 os_page_mask;
|
||||
extern NvU8 os_page_shift;
|
||||
extern NvU32 os_sev_status;
|
||||
extern NvBool os_sev_enabled;
|
||||
extern NvBool os_dma_buf_enabled;
|
||||
|
||||
/*
|
||||
* ---------------------------------------------------------------------------
|
||||
*
|
||||
* Debug macros.
|
||||
*
|
||||
* ---------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#define NV_DBG_INFO 0x0
|
||||
#define NV_DBG_SETUP 0x1
|
||||
#define NV_DBG_USERERRORS 0x2
|
||||
#define NV_DBG_WARNINGS 0x3
|
||||
#define NV_DBG_ERRORS 0x4
|
||||
|
||||
|
||||
void NV_API_CALL out_string(const char *str);
|
||||
int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...);
|
||||
|
||||
#define NV_DEV_PRINTF(debuglevel, nv, format, ... ) \
|
||||
nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format, NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__)
|
||||
|
||||
#define NV_DEV_PRINTF_STATUS(debuglevel, nv, status, format, ... ) \
|
||||
nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format " (0x%x)\n", NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__, status)
|
||||
|
||||
/*
|
||||
* Fields for os_lock_user_pages flags parameter
|
||||
*/
|
||||
#define NV_LOCK_USER_PAGES_FLAGS_WRITE 0:0
|
||||
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000
|
||||
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001
|
||||
|
||||
|
||||
// NV OS Tegra platform type defines
|
||||
#define NV_OS_TEGRA_PLATFORM_SIM 0
|
||||
#define NV_OS_TEGRA_PLATFORM_FPGA 1
|
||||
#define NV_OS_TEGRA_PLATFORM_SILICON 2
|
||||
|
||||
|
||||
#endif /* OS_INTERFACE_H */
|
||||
@@ -1,121 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DMA_FENCE_HELPER_H__
|
||||
#define __NVIDIA_DMA_FENCE_HELPER_H__
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
||||
|
||||
/*
|
||||
* Fence headers are moved to file dma-fence.h and struct fence has
|
||||
* been renamed to dma_fence by commit -
|
||||
*
|
||||
* 2016-10-25 : f54d1867005c3323f5d8ad83eed823e84226c429
|
||||
*/
|
||||
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
#include <linux/fence.h>
|
||||
#else
|
||||
#include <linux/dma-fence.h>
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
typedef struct fence nv_dma_fence_t;
|
||||
typedef struct fence_ops nv_dma_fence_ops_t;
|
||||
#else
|
||||
typedef struct dma_fence nv_dma_fence_t;
|
||||
typedef struct dma_fence_ops nv_dma_fence_ops_t;
|
||||
#endif
|
||||
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT
|
||||
#else
|
||||
#define NV_DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT
|
||||
#endif
|
||||
|
||||
static inline bool nv_dma_fence_is_signaled(nv_dma_fence_t *fence) {
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
return fence_is_signaled(fence);
|
||||
#else
|
||||
return dma_fence_is_signaled(fence);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline nv_dma_fence_t *nv_dma_fence_get(nv_dma_fence_t *fence)
|
||||
{
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
return fence_get(fence);
|
||||
#else
|
||||
return dma_fence_get(fence);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_dma_fence_put(nv_dma_fence_t *fence) {
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
fence_put(fence);
|
||||
#else
|
||||
dma_fence_put(fence);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline signed long
|
||||
nv_dma_fence_default_wait(nv_dma_fence_t *fence,
|
||||
bool intr, signed long timeout) {
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
return fence_default_wait(fence, intr, timeout);
|
||||
#else
|
||||
return dma_fence_default_wait(fence, intr, timeout);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int nv_dma_fence_signal(nv_dma_fence_t *fence) {
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
return fence_signal(fence);
|
||||
#else
|
||||
return dma_fence_signal(fence);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline u64 nv_dma_fence_context_alloc(unsigned num) {
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
return fence_context_alloc(num);
|
||||
#else
|
||||
return dma_fence_context_alloc(num);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
nv_dma_fence_init(nv_dma_fence_t *fence,
|
||||
const nv_dma_fence_ops_t *ops,
|
||||
spinlock_t *lock, u64 context, unsigned seqno) {
|
||||
#if defined(NV_LINUX_FENCE_H_PRESENT)
|
||||
fence_init(fence, ops, lock, context, seqno);
|
||||
#else
|
||||
dma_fence_init(fence, ops, lock, context, seqno);
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* defined(NV_DRM_FENCE_AVAILABLE) */
|
||||
|
||||
#endif /* __NVIDIA_DMA_FENCE_HELPER_H__ */
|
||||
@@ -1,64 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVIDIA_DRM_CONFTEST_H__
|
||||
#define __NVIDIA_DRM_CONFTEST_H__
|
||||
|
||||
#include "conftest.h"
|
||||
|
||||
/*
|
||||
* NOTE: This file is expected to get included at the top before including any
|
||||
* of linux/drm headers.
|
||||
*
|
||||
* The goal is to redefine refcount_dec_and_test and refcount_inc before
|
||||
* including drm header files, so that the drm macro/inline calls to
|
||||
* refcount_dec_and_test* and refcount_inc get redirected to
|
||||
* alternate implementation in this file.
|
||||
*/
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_GPL_refcount_inc
|
||||
|
||||
#include <linux/refcount.h>
|
||||
|
||||
#define refcount_inc(__ptr) \
|
||||
do { \
|
||||
atomic_inc(&(__ptr)->refs); \
|
||||
} while(0)
|
||||
|
||||
#endif
|
||||
|
||||
#if NV_IS_EXPORT_SYMBOL_GPL_refcount_dec_and_test
|
||||
|
||||
#include <linux/refcount.h>
|
||||
|
||||
#define refcount_dec_and_test(__ptr) atomic_dec_and_test(&(__ptr)->refs)
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(NV_DRM_DRIVER_HAS_GEM_PRIME_RES_OBJ) || \
|
||||
defined(NV_DRM_GEM_OBJECT_HAS_RESV)
|
||||
#define NV_DRM_FENCE_AVAILABLE
|
||||
#else
|
||||
#undef NV_DRM_FENCE_AVAILABLE
|
||||
#endif
|
||||
|
||||
#endif /* defined(__NVIDIA_DRM_CONFTEST_H__) */
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,518 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-drm-conftest.h"
|
||||
|
||||
#if defined(NV_DRM_AVAILABLE)
|
||||
|
||||
#if defined(NV_DRM_DRMP_H_PRESENT)
|
||||
#include <drm/drmP.h>
|
||||
#endif
|
||||
|
||||
#include "nvidia-drm-priv.h"
|
||||
#include "nvidia-drm-ioctl.h"
|
||||
#include "nvidia-drm-gem.h"
|
||||
#include "nvidia-drm-prime-fence.h"
|
||||
#include "nvidia-dma-resv-helper.h"
|
||||
|
||||
#if defined(NV_DRM_FENCE_AVAILABLE)
|
||||
|
||||
#include "nvidia-dma-fence-helper.h"
|
||||
|
||||
struct nv_drm_fence_context {
|
||||
struct nv_drm_device *nv_dev;
|
||||
|
||||
uint32_t context;
|
||||
|
||||
NvU64 fenceSemIndex; /* Index into semaphore surface */
|
||||
|
||||
/* Mapped semaphore surface */
|
||||
struct NvKmsKapiMemory *pSemSurface;
|
||||
NvU32 *pLinearAddress;
|
||||
|
||||
/* Protects nv_drm_fence_context::{pending, last_seqno} */
|
||||
spinlock_t lock;
|
||||
|
||||
/*
|
||||
* Software signaling structures. __nv_drm_fence_context_new()
|
||||
* allocates channel event and __nv_drm_fence_context_destroy() frees it.
|
||||
* There are no simultaneous read/write access to 'cb', therefore it does
|
||||
* not require spin-lock protection.
|
||||
*/
|
||||
struct NvKmsKapiChannelEvent *cb;
|
||||
|
||||
/* List of pending fences which are not yet signaled */
|
||||
struct list_head pending;
|
||||
|
||||
unsigned last_seqno;
|
||||
};
|
||||
|
||||
struct nv_drm_prime_fence {
|
||||
struct list_head list_entry;
|
||||
nv_dma_fence_t base;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
static inline
|
||||
struct nv_drm_prime_fence *to_nv_drm_prime_fence(nv_dma_fence_t *fence)
|
||||
{
|
||||
return container_of(fence, struct nv_drm_prime_fence, base);
|
||||
}
|
||||
|
||||
static const char*
|
||||
nv_drm_gem_prime_fence_op_get_driver_name(nv_dma_fence_t *fence)
|
||||
{
|
||||
return "NVIDIA";
|
||||
}
|
||||
|
||||
static const char*
|
||||
nv_drm_gem_prime_fence_op_get_timeline_name(nv_dma_fence_t *fence)
|
||||
{
|
||||
return "nvidia.prime";
|
||||
}
|
||||
|
||||
static bool nv_drm_gem_prime_fence_op_enable_signaling(nv_dma_fence_t *fence)
|
||||
{
|
||||
// DO NOTHING
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nv_drm_gem_prime_fence_op_release(nv_dma_fence_t *fence)
|
||||
{
|
||||
struct nv_drm_prime_fence *nv_fence = to_nv_drm_prime_fence(fence);
|
||||
nv_drm_free(nv_fence);
|
||||
}
|
||||
|
||||
static signed long
|
||||
nv_drm_gem_prime_fence_op_wait(nv_dma_fence_t *fence,
|
||||
bool intr, signed long timeout)
|
||||
{
|
||||
/*
|
||||
* If the waiter requests to wait with no timeout, force a timeout to ensure
|
||||
* that it won't get stuck forever in the kernel if something were to go
|
||||
* wrong with signaling, such as a malicious userspace not releasing the
|
||||
* semaphore.
|
||||
*
|
||||
* 96 ms (roughly 6 frames @ 60 Hz) is arbitrarily chosen to be long enough
|
||||
* that it should never get hit during normal operation, but not so long
|
||||
* that the system becomes unresponsive.
|
||||
*/
|
||||
return nv_dma_fence_default_wait(fence, intr,
|
||||
(timeout == MAX_SCHEDULE_TIMEOUT) ?
|
||||
msecs_to_jiffies(96) : timeout);
|
||||
}
|
||||
|
||||
static const nv_dma_fence_ops_t nv_drm_gem_prime_fence_ops = {
|
||||
.get_driver_name = nv_drm_gem_prime_fence_op_get_driver_name,
|
||||
.get_timeline_name = nv_drm_gem_prime_fence_op_get_timeline_name,
|
||||
.enable_signaling = nv_drm_gem_prime_fence_op_enable_signaling,
|
||||
.release = nv_drm_gem_prime_fence_op_release,
|
||||
.wait = nv_drm_gem_prime_fence_op_wait,
|
||||
};
|
||||
|
||||
static inline void
|
||||
__nv_drm_prime_fence_signal(struct nv_drm_prime_fence *nv_fence)
|
||||
{
|
||||
list_del(&nv_fence->list_entry);
|
||||
nv_dma_fence_signal(&nv_fence->base);
|
||||
nv_dma_fence_put(&nv_fence->base);
|
||||
}
|
||||
|
||||
static void nv_drm_gem_prime_force_fence_signal(
|
||||
struct nv_drm_fence_context *nv_fence_context)
|
||||
{
|
||||
WARN_ON(!spin_is_locked(&nv_fence_context->lock));
|
||||
|
||||
while (!list_empty(&nv_fence_context->pending)) {
|
||||
struct nv_drm_prime_fence *nv_fence = list_first_entry(
|
||||
&nv_fence_context->pending,
|
||||
typeof(*nv_fence),
|
||||
list_entry);
|
||||
|
||||
__nv_drm_prime_fence_signal(nv_fence);
|
||||
}
|
||||
}
|
||||
|
||||
static void nv_drm_gem_prime_fence_event
|
||||
(
|
||||
void *dataPtr,
|
||||
NvU32 dataU32
|
||||
)
|
||||
{
|
||||
struct nv_drm_fence_context *nv_fence_context = dataPtr;
|
||||
|
||||
spin_lock(&nv_fence_context->lock);
|
||||
|
||||
while (!list_empty(&nv_fence_context->pending)) {
|
||||
struct nv_drm_prime_fence *nv_fence = list_first_entry(
|
||||
&nv_fence_context->pending,
|
||||
typeof(*nv_fence),
|
||||
list_entry);
|
||||
|
||||
/* Index into surface with 16 byte stride */
|
||||
unsigned int seqno = *((nv_fence_context->pLinearAddress) +
|
||||
(nv_fence_context->fenceSemIndex * 4));
|
||||
|
||||
if (nv_fence->base.seqno > seqno) {
|
||||
/*
|
||||
* Fences in list are placed in increasing order of sequence
|
||||
* number, breaks a loop once found first fence not
|
||||
* ready to signal.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
__nv_drm_prime_fence_signal(nv_fence);
|
||||
}
|
||||
|
||||
spin_unlock(&nv_fence_context->lock);
|
||||
}
|
||||
|
||||
static inline struct nv_drm_fence_context *__nv_drm_fence_context_new(
|
||||
struct nv_drm_device *nv_dev,
|
||||
struct drm_nvidia_fence_context_create_params *p)
|
||||
{
|
||||
struct nv_drm_fence_context *nv_fence_context;
|
||||
struct NvKmsKapiMemory *pSemSurface;
|
||||
NvU32 *pLinearAddress;
|
||||
|
||||
/* Allocate backup nvkms resources */
|
||||
|
||||
pSemSurface = nvKms->importMemory(nv_dev->pDevice,
|
||||
p->size,
|
||||
p->import_mem_nvkms_params_ptr,
|
||||
p->import_mem_nvkms_params_size);
|
||||
if (!pSemSurface) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to import fence semaphore surface");
|
||||
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (!nvKms->mapMemory(nv_dev->pDevice,
|
||||
pSemSurface,
|
||||
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
|
||||
(void **) &pLinearAddress)) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to map fence semaphore surface");
|
||||
|
||||
goto failed_to_map_memory;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a fence context object, initialize it and allocate channel
|
||||
* event for it.
|
||||
*/
|
||||
|
||||
if ((nv_fence_context = nv_drm_calloc(
|
||||
1,
|
||||
sizeof(*nv_fence_context))) == NULL) {
|
||||
goto failed_alloc_fence_context;
|
||||
}
|
||||
|
||||
/*
|
||||
* nv_dma_fence_context_alloc() cannot fail, so we do not need
|
||||
* to check a return value.
|
||||
*/
|
||||
|
||||
*nv_fence_context = (struct nv_drm_fence_context) {
|
||||
.nv_dev = nv_dev,
|
||||
.context = nv_dma_fence_context_alloc(1),
|
||||
.pSemSurface = pSemSurface,
|
||||
.pLinearAddress = pLinearAddress,
|
||||
.fenceSemIndex = p->index,
|
||||
};
|
||||
|
||||
INIT_LIST_HEAD(&nv_fence_context->pending);
|
||||
|
||||
spin_lock_init(&nv_fence_context->lock);
|
||||
|
||||
/*
|
||||
* Except 'cb', the fence context should be completely initialized
|
||||
* before channel event allocation because the fence context may start
|
||||
* receiving events immediately after allocation.
|
||||
*
|
||||
* There are no simultaneous read/write access to 'cb', therefore it does
|
||||
* not require spin-lock protection.
|
||||
*/
|
||||
nv_fence_context->cb =
|
||||
nvKms->allocateChannelEvent(nv_dev->pDevice,
|
||||
nv_drm_gem_prime_fence_event,
|
||||
nv_fence_context,
|
||||
p->event_nvkms_params_ptr,
|
||||
p->event_nvkms_params_size);
|
||||
if (!nv_fence_context->cb) {
|
||||
NV_DRM_DEV_LOG_ERR(nv_dev,
|
||||
"Failed to allocate fence signaling event");
|
||||
goto failed_to_allocate_channel_event;
|
||||
}
|
||||
|
||||
return nv_fence_context;
|
||||
|
||||
failed_to_allocate_channel_event:
|
||||
nv_drm_free(nv_fence_context);
|
||||
|
||||
failed_alloc_fence_context:
|
||||
|
||||
nvKms->unmapMemory(nv_dev->pDevice,
|
||||
pSemSurface,
|
||||
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
|
||||
(void *) pLinearAddress);
|
||||
|
||||
failed_to_map_memory:
|
||||
nvKms->freeMemory(nv_dev->pDevice, pSemSurface);
|
||||
|
||||
failed:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __nv_drm_fence_context_destroy(
|
||||
struct nv_drm_fence_context *nv_fence_context)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = nv_fence_context->nv_dev;
|
||||
|
||||
/*
|
||||
* Free channel event before destroying the fence context, otherwise event
|
||||
* callback continue to get called.
|
||||
*/
|
||||
nvKms->freeChannelEvent(nv_dev->pDevice, nv_fence_context->cb);
|
||||
|
||||
/* Force signal all pending fences and empty pending list */
|
||||
spin_lock(&nv_fence_context->lock);
|
||||
|
||||
nv_drm_gem_prime_force_fence_signal(nv_fence_context);
|
||||
|
||||
spin_unlock(&nv_fence_context->lock);
|
||||
|
||||
/* Free nvkms resources */
|
||||
|
||||
nvKms->unmapMemory(nv_dev->pDevice,
|
||||
nv_fence_context->pSemSurface,
|
||||
NVKMS_KAPI_MAPPING_TYPE_KERNEL,
|
||||
(void *) nv_fence_context->pLinearAddress);
|
||||
|
||||
nvKms->freeMemory(nv_dev->pDevice, nv_fence_context->pSemSurface);
|
||||
|
||||
nv_drm_free(nv_fence_context);
|
||||
}
|
||||
|
||||
static nv_dma_fence_t *__nv_drm_fence_context_create_fence(
|
||||
struct nv_drm_fence_context *nv_fence_context,
|
||||
unsigned int seqno)
|
||||
{
|
||||
struct nv_drm_prime_fence *nv_fence;
|
||||
int ret = 0;
|
||||
|
||||
if ((nv_fence = nv_drm_calloc(1, sizeof(*nv_fence))) == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock(&nv_fence_context->lock);
|
||||
|
||||
/*
|
||||
* If seqno wrapped, force signal fences to make sure none of them
|
||||
* get stuck.
|
||||
*/
|
||||
if (seqno < nv_fence_context->last_seqno) {
|
||||
nv_drm_gem_prime_force_fence_signal(nv_fence_context);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&nv_fence->list_entry);
|
||||
|
||||
spin_lock_init(&nv_fence->lock);
|
||||
|
||||
nv_dma_fence_init(&nv_fence->base, &nv_drm_gem_prime_fence_ops,
|
||||
&nv_fence->lock, nv_fence_context->context,
|
||||
seqno);
|
||||
|
||||
list_add_tail(&nv_fence->list_entry, &nv_fence_context->pending);
|
||||
|
||||
nv_fence_context->last_seqno = seqno;
|
||||
|
||||
spin_unlock(&nv_fence_context->lock);
|
||||
|
||||
out:
|
||||
return ret != 0 ? ERR_PTR(ret) : &nv_fence->base;
|
||||
}
|
||||
|
||||
int nv_drm_fence_supported_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
return nv_dev->pDevice ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
struct nv_drm_gem_fence_context {
|
||||
struct nv_drm_gem_object base;
|
||||
struct nv_drm_fence_context *nv_fence_context;
|
||||
};
|
||||
|
||||
static inline struct nv_drm_gem_fence_context *to_gem_fence_context(
|
||||
struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
if (nv_gem != NULL) {
|
||||
return container_of(nv_gem, struct nv_drm_gem_fence_context, base);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tear down of the 'struct nv_drm_gem_fence_context' object is not expected
|
||||
* to be happen from any worker thread, if that happen it causes dead-lock
|
||||
* because tear down sequence calls to flush all existing
|
||||
* worker thread.
|
||||
*/
|
||||
static void __nv_drm_gem_fence_context_free(struct nv_drm_gem_object *nv_gem)
|
||||
{
|
||||
struct nv_drm_gem_fence_context *nv_gem_fence_context =
|
||||
to_gem_fence_context(nv_gem);
|
||||
|
||||
__nv_drm_fence_context_destroy(nv_gem_fence_context->nv_fence_context);
|
||||
|
||||
nv_drm_free(nv_gem_fence_context);
|
||||
}
|
||||
|
||||
const struct nv_drm_gem_object_funcs nv_gem_fence_context_ops = {
|
||||
.free = __nv_drm_gem_fence_context_free,
|
||||
};
|
||||
|
||||
static inline
|
||||
struct nv_drm_gem_fence_context *__nv_drm_gem_object_fence_context_lookup(
|
||||
struct drm_device *dev,
|
||||
struct drm_file *filp,
|
||||
u32 handle)
|
||||
{
|
||||
struct nv_drm_gem_object *nv_gem =
|
||||
nv_drm_gem_object_lookup(dev, filp, handle);
|
||||
|
||||
if (nv_gem != NULL && nv_gem->ops != &nv_gem_fence_context_ops) {
|
||||
nv_drm_gem_object_unreference_unlocked(nv_gem);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return to_gem_fence_context(nv_gem);
|
||||
}
|
||||
|
||||
int nv_drm_fence_context_create_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep)
|
||||
{
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct drm_nvidia_fence_context_create_params *p = data;
|
||||
struct nv_drm_gem_fence_context *nv_gem_fence_context = NULL;
|
||||
|
||||
if ((nv_gem_fence_context = nv_drm_calloc(
|
||||
1,
|
||||
sizeof(struct nv_drm_gem_fence_context))) == NULL) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
if ((nv_gem_fence_context->nv_fence_context =
|
||||
__nv_drm_fence_context_new(nv_dev, p)) == NULL) {
|
||||
goto fence_context_new_failed;
|
||||
}
|
||||
|
||||
nv_drm_gem_object_init(nv_dev,
|
||||
&nv_gem_fence_context->base,
|
||||
&nv_gem_fence_context_ops,
|
||||
0 /* size */,
|
||||
NULL /* pMemory */);
|
||||
|
||||
return nv_drm_gem_handle_create_drop_reference(filep,
|
||||
&nv_gem_fence_context->base,
|
||||
&p->handle);
|
||||
|
||||
fence_context_new_failed:
|
||||
nv_drm_free(nv_gem_fence_context);
|
||||
|
||||
done:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int nv_drm_gem_fence_attach_ioctl(struct drm_device *dev,
|
||||
void *data, struct drm_file *filep)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct nv_drm_device *nv_dev = to_nv_device(dev);
|
||||
struct drm_nvidia_gem_fence_attach_params *p = data;
|
||||
|
||||
struct nv_drm_gem_object *nv_gem;
|
||||
struct nv_drm_gem_fence_context *nv_gem_fence_context;
|
||||
|
||||
nv_dma_fence_t *fence;
|
||||
|
||||
nv_gem = nv_drm_gem_object_lookup(nv_dev->dev, filep, p->handle);
|
||||
|
||||
if (!nv_gem) {
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to lookup gem object for fence attach: 0x%08x",
|
||||
p->handle);
|
||||
|
||||
goto done;
|
||||
}
|
||||
|
||||
if((nv_gem_fence_context = __nv_drm_gem_object_fence_context_lookup(
|
||||
nv_dev->dev,
|
||||
filep,
|
||||
p->fence_context_handle)) == NULL) {
|
||||
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to lookup gem object for fence context: 0x%08x",
|
||||
p->fence_context_handle);
|
||||
|
||||
goto fence_context_lookup_failed;
|
||||
}
|
||||
|
||||
if (IS_ERR(fence = __nv_drm_fence_context_create_fence(
|
||||
nv_gem_fence_context->nv_fence_context,
|
||||
p->sem_thresh))) {
|
||||
ret = PTR_ERR(fence);
|
||||
|
||||
NV_DRM_DEV_LOG_ERR(
|
||||
nv_dev,
|
||||
"Failed to allocate fence: 0x%08x", p->handle);
|
||||
|
||||
goto fence_context_create_fence_failed;
|
||||
}
|
||||
|
||||
nv_dma_resv_add_excl_fence(&nv_gem->resv, fence);
|
||||
|
||||
ret = 0;
|
||||
|
||||
fence_context_create_fence_failed:
|
||||
nv_drm_gem_object_unreference_unlocked(&nv_gem_fence_context->base);
|
||||
|
||||
fence_context_lookup_failed:
|
||||
nv_drm_gem_object_unreference_unlocked(nv_gem);
|
||||
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* NV_DRM_FENCE_AVAILABLE */
|
||||
|
||||
#endif /* NV_DRM_AVAILABLE */
|
||||
@@ -1,896 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include <linux/dma-buf.h>
|
||||
#include "nv-dmabuf.h"
|
||||
|
||||
|
||||
|
||||
#if defined(CONFIG_DMA_SHARED_BUFFER)
|
||||
typedef struct nv_dma_buf_mem_handle
|
||||
{
|
||||
NvHandle h_memory;
|
||||
NvU64 offset;
|
||||
NvU64 size;
|
||||
NvU64 bar1_va;
|
||||
} nv_dma_buf_mem_handle_t;
|
||||
|
||||
typedef struct nv_dma_buf_file_private
|
||||
{
|
||||
nv_state_t *nv;
|
||||
NvHandle h_client;
|
||||
NvHandle h_device;
|
||||
NvHandle h_subdevice;
|
||||
NvU32 total_objects;
|
||||
NvU32 num_objects;
|
||||
NvU64 total_size;
|
||||
NvU64 attached_size;
|
||||
struct mutex lock;
|
||||
nv_dma_buf_mem_handle_t *handles;
|
||||
NvU64 bar1_va_ref_count;
|
||||
void *mig_info;
|
||||
} nv_dma_buf_file_private_t;
|
||||
|
||||
static void
|
||||
nv_dma_buf_free_file_private(
|
||||
nv_dma_buf_file_private_t *priv
|
||||
)
|
||||
{
|
||||
if (priv == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (priv->handles != NULL)
|
||||
{
|
||||
NV_KFREE(priv->handles, priv->total_objects * sizeof(priv->handles[0]));
|
||||
priv->handles = NULL;
|
||||
}
|
||||
|
||||
mutex_destroy(&priv->lock);
|
||||
|
||||
NV_KFREE(priv, sizeof(nv_dma_buf_file_private_t));
|
||||
}
|
||||
|
||||
static nv_dma_buf_file_private_t*
|
||||
nv_dma_buf_alloc_file_private(
|
||||
NvU32 num_handles
|
||||
)
|
||||
{
|
||||
nv_dma_buf_file_private_t *priv = NULL;
|
||||
|
||||
NV_KMALLOC(priv, sizeof(nv_dma_buf_file_private_t));
|
||||
if (priv == NULL)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(priv, 0, sizeof(nv_dma_buf_file_private_t));
|
||||
|
||||
mutex_init(&priv->lock);
|
||||
|
||||
NV_KMALLOC(priv->handles, num_handles * sizeof(priv->handles[0]));
|
||||
if (priv->handles == NULL)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
|
||||
memset(priv->handles, 0, num_handles * sizeof(priv->handles[0]));
|
||||
|
||||
return priv;
|
||||
|
||||
failed:
|
||||
nv_dma_buf_free_file_private(priv);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Must be called with RMAPI lock and GPU lock taken
|
||||
static void
|
||||
nv_dma_buf_undup_mem_handles_unlocked(
|
||||
nvidia_stack_t *sp,
|
||||
NvU32 index,
|
||||
NvU32 num_objects,
|
||||
nv_dma_buf_file_private_t *priv
|
||||
)
|
||||
{
|
||||
NvU32 i = 0;
|
||||
|
||||
for (i = index; i < num_objects; i++)
|
||||
{
|
||||
if (priv->handles[i].h_memory == 0)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
rm_dma_buf_undup_mem_handle(sp, priv->nv, priv->h_client,
|
||||
priv->handles[i].h_memory);
|
||||
|
||||
priv->attached_size -= priv->handles[i].size;
|
||||
priv->handles[i].h_memory = 0;
|
||||
priv->handles[i].offset = 0;
|
||||
priv->handles[i].size = 0;
|
||||
priv->num_objects--;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nv_dma_buf_undup_mem_handles(
|
||||
nvidia_stack_t *sp,
|
||||
NvU32 index,
|
||||
NvU32 num_objects,
|
||||
nv_dma_buf_file_private_t *priv
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
|
||||
status = rm_acquire_api_lock(sp);
|
||||
if (WARN_ON(status != NV_OK))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
status = rm_acquire_all_gpus_lock(sp);
|
||||
if (WARN_ON(status != NV_OK))
|
||||
{
|
||||
goto unlock_api_lock;
|
||||
}
|
||||
|
||||
nv_dma_buf_undup_mem_handles_unlocked(sp, index, num_objects, priv);
|
||||
|
||||
rm_release_all_gpus_lock(sp);
|
||||
|
||||
unlock_api_lock:
|
||||
rm_release_api_lock(sp);
|
||||
}
|
||||
|
||||
static NV_STATUS
|
||||
nv_dma_buf_dup_mem_handles(
|
||||
nvidia_stack_t *sp,
|
||||
nv_dma_buf_file_private_t *priv,
|
||||
nv_ioctl_export_to_dma_buf_fd_t *params
|
||||
)
|
||||
{
|
||||
NV_STATUS status = NV_OK;
|
||||
NvU32 index = params->index;
|
||||
NvU32 count = 0;
|
||||
NvU32 i = 0;
|
||||
|
||||
status = rm_acquire_api_lock(sp);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
return status;
|
||||
}
|
||||
|
||||
status = rm_acquire_gpu_lock(sp, priv->nv);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto unlock_api_lock;
|
||||
}
|
||||
|
||||
for (i = 0; i < params->numObjects; i++)
|
||||
{
|
||||
NvHandle h_memory_duped = 0;
|
||||
|
||||
if (priv->handles[index].h_memory != 0)
|
||||
{
|
||||
status = NV_ERR_IN_USE;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (params->sizes[i] > priv->total_size - priv->attached_size)
|
||||
{
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
status = rm_dma_buf_dup_mem_handle(sp, priv->nv,
|
||||
params->hClient,
|
||||
priv->h_client,
|
||||
priv->h_device,
|
||||
priv->h_subdevice,
|
||||
priv->mig_info,
|
||||
params->handles[i],
|
||||
params->offsets[i],
|
||||
params->sizes[i],
|
||||
&h_memory_duped);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto failed;
|
||||
}
|
||||
|
||||
priv->attached_size += params->sizes[i];
|
||||
priv->handles[index].h_memory = h_memory_duped;
|
||||
priv->handles[index].offset = params->offsets[i];
|
||||
priv->handles[index].size = params->sizes[i];
|
||||
priv->num_objects++;
|
||||
index++;
|
||||
count++;
|
||||
}
|
||||
|
||||
if ((priv->num_objects == priv->total_objects) &&
|
||||
(priv->attached_size != priv->total_size))
|
||||
{
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
rm_release_gpu_lock(sp, priv->nv);
|
||||
|
||||
rm_release_api_lock(sp);
|
||||
|
||||
return NV_OK;
|
||||
|
||||
failed:
|
||||
nv_dma_buf_undup_mem_handles_unlocked(sp, params->index, count, priv);
|
||||
|
||||
rm_release_gpu_lock(sp, priv->nv);
|
||||
|
||||
unlock_api_lock:
|
||||
rm_release_api_lock(sp);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
// Must be called with RMAPI lock and GPU lock taken
|
||||
static void
|
||||
nv_dma_buf_unmap_unlocked(
|
||||
nvidia_stack_t *sp,
|
||||
nv_dma_device_t *peer_dma_dev,
|
||||
nv_dma_buf_file_private_t *priv,
|
||||
struct sg_table *sgt,
|
||||
NvU32 count
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
NvU32 i;
|
||||
NvU64 dma_len;
|
||||
NvU64 dma_addr;
|
||||
NvU64 bar1_va;
|
||||
NvBool bar1_unmap_needed;
|
||||
struct scatterlist *sg = NULL;
|
||||
|
||||
bar1_unmap_needed = (priv->bar1_va_ref_count == 0);
|
||||
|
||||
for_each_sg(sgt->sgl, sg, count, i)
|
||||
{
|
||||
dma_addr = sg_dma_address(sg);
|
||||
dma_len = priv->handles[i].size;
|
||||
bar1_va = priv->handles[i].bar1_va;
|
||||
|
||||
WARN_ON(sg_dma_len(sg) != priv->handles[i].size);
|
||||
|
||||
nv_dma_unmap_peer(peer_dma_dev, (dma_len / os_page_size), dma_addr);
|
||||
|
||||
if (bar1_unmap_needed)
|
||||
{
|
||||
status = rm_dma_buf_unmap_mem_handle(sp, priv->nv, priv->h_client,
|
||||
priv->handles[i].h_memory,
|
||||
priv->handles[i].size,
|
||||
priv->handles[i].bar1_va);
|
||||
WARN_ON(status != NV_OK);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct sg_table*
|
||||
nv_dma_buf_map(
|
||||
struct dma_buf_attachment *attachment,
|
||||
enum dma_data_direction direction
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
struct scatterlist *sg = NULL;
|
||||
struct sg_table *sgt = NULL;
|
||||
struct dma_buf *buf = attachment->dmabuf;
|
||||
struct device *dev = attachment->dev;
|
||||
nv_dma_buf_file_private_t *priv = buf->priv;
|
||||
nv_dma_device_t peer_dma_dev = {{ 0 }};
|
||||
NvBool bar1_map_needed;
|
||||
NvBool bar1_unmap_needed;
|
||||
NvU32 count = 0;
|
||||
NvU32 i = 0;
|
||||
int rc = 0;
|
||||
|
||||
//
|
||||
// We support importers that are able to handle MMIO resources
|
||||
// not backed by struct page. This will need to be revisited
|
||||
// when dma-buf support for P9 will be added.
|
||||
//
|
||||
#if defined(NV_DMA_BUF_HAS_DYNAMIC_ATTACHMENT) && \
|
||||
defined(NV_DMA_BUF_ATTACHMENT_HAS_PEER2PEER)
|
||||
if (dma_buf_attachment_is_dynamic(attachment) &&
|
||||
!attachment->peer2peer)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: failed to map dynamic attachment with no P2P support\n");
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
if (priv->num_objects != priv->total_objects)
|
||||
{
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
status = rm_acquire_api_lock(sp);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto free_sp;
|
||||
}
|
||||
|
||||
status = rm_acquire_gpu_lock(sp, priv->nv);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto unlock_api_lock;
|
||||
}
|
||||
|
||||
NV_KMALLOC(sgt, sizeof(struct sg_table));
|
||||
if (sgt == NULL)
|
||||
{
|
||||
goto unlock_gpu_lock;
|
||||
}
|
||||
|
||||
memset(sgt, 0, sizeof(struct sg_table));
|
||||
|
||||
//
|
||||
// RM currently returns contiguous BAR1, so we create as many
|
||||
// sg entries as the number of handles being mapped.
|
||||
// When RM can alloc discontiguous BAR1, this code will need to be revisited.
|
||||
//
|
||||
rc = sg_alloc_table(sgt, priv->num_objects, GFP_KERNEL);
|
||||
if (rc != 0)
|
||||
{
|
||||
goto free_sgt;
|
||||
}
|
||||
|
||||
peer_dma_dev.dev = dev;
|
||||
peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask;
|
||||
bar1_map_needed = bar1_unmap_needed = (priv->bar1_va_ref_count == 0);
|
||||
|
||||
for_each_sg(sgt->sgl, sg, priv->num_objects, i)
|
||||
{
|
||||
NvU64 dma_addr;
|
||||
NvU64 dma_len;
|
||||
|
||||
if (bar1_map_needed)
|
||||
{
|
||||
status = rm_dma_buf_map_mem_handle(sp, priv->nv, priv->h_client,
|
||||
priv->handles[i].h_memory,
|
||||
priv->handles[i].offset,
|
||||
priv->handles[i].size,
|
||||
&priv->handles[i].bar1_va);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto unmap_handles;
|
||||
}
|
||||
}
|
||||
|
||||
dma_addr = priv->handles[i].bar1_va;
|
||||
dma_len = priv->handles[i].size;
|
||||
|
||||
status = nv_dma_map_peer(&peer_dma_dev, priv->nv->dma_dev,
|
||||
0x1, (dma_len / os_page_size), &dma_addr);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
if (bar1_unmap_needed)
|
||||
{
|
||||
// Unmap the recently mapped memory handle
|
||||
(void) rm_dma_buf_unmap_mem_handle(sp, priv->nv, priv->h_client,
|
||||
priv->handles[i].h_memory,
|
||||
priv->handles[i].size,
|
||||
priv->handles[i].bar1_va);
|
||||
}
|
||||
|
||||
// Unmap remaining memory handles
|
||||
goto unmap_handles;
|
||||
}
|
||||
|
||||
sg_set_page(sg, NULL, dma_len, 0);
|
||||
sg_dma_address(sg) = (dma_addr_t)dma_addr;
|
||||
sg_dma_len(sg) = dma_len;
|
||||
count++;
|
||||
}
|
||||
|
||||
priv->bar1_va_ref_count++;
|
||||
|
||||
rm_release_gpu_lock(sp, priv->nv);
|
||||
|
||||
rm_release_api_lock(sp);
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return sgt;
|
||||
|
||||
unmap_handles:
|
||||
nv_dma_buf_unmap_unlocked(sp, &peer_dma_dev, priv, sgt, count);
|
||||
|
||||
sg_free_table(sgt);
|
||||
|
||||
free_sgt:
|
||||
NV_KFREE(sgt, sizeof(struct sg_table));
|
||||
|
||||
unlock_gpu_lock:
|
||||
rm_release_gpu_lock(sp, priv->nv);
|
||||
|
||||
unlock_api_lock:
|
||||
rm_release_api_lock(sp);
|
||||
|
||||
free_sp:
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
unlock_priv:
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
nv_dma_buf_unmap(
|
||||
struct dma_buf_attachment *attachment,
|
||||
struct sg_table *sgt,
|
||||
enum dma_data_direction direction
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
struct dma_buf *buf = attachment->dmabuf;
|
||||
struct device *dev = attachment->dev;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
nv_dma_buf_file_private_t *priv = buf->priv;
|
||||
nv_dma_device_t peer_dma_dev = {{ 0 }};
|
||||
int rc = 0;
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
|
||||
if (priv->num_objects != priv->total_objects)
|
||||
{
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (WARN_ON(rc != 0))
|
||||
{
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
status = rm_acquire_api_lock(sp);
|
||||
if (WARN_ON(status != NV_OK))
|
||||
{
|
||||
goto free_sp;
|
||||
}
|
||||
|
||||
status = rm_acquire_gpu_lock(sp, priv->nv);
|
||||
if (WARN_ON(status != NV_OK))
|
||||
{
|
||||
goto unlock_api_lock;
|
||||
}
|
||||
|
||||
peer_dma_dev.dev = dev;
|
||||
peer_dma_dev.addressable_range.limit = (NvU64)dev->dma_mask;
|
||||
|
||||
priv->bar1_va_ref_count--;
|
||||
|
||||
nv_dma_buf_unmap_unlocked(sp, &peer_dma_dev, priv, sgt, priv->num_objects);
|
||||
|
||||
sg_free_table(sgt);
|
||||
|
||||
NV_KFREE(sgt, sizeof(struct sg_table));
|
||||
|
||||
rm_release_gpu_lock(sp, priv->nv);
|
||||
|
||||
unlock_api_lock:
|
||||
rm_release_api_lock(sp);
|
||||
|
||||
free_sp:
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
unlock_priv:
|
||||
mutex_unlock(&priv->lock);
|
||||
}
|
||||
|
||||
static void
|
||||
nv_dma_buf_release(
|
||||
struct dma_buf *buf
|
||||
)
|
||||
{
|
||||
int rc = 0;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
nv_dma_buf_file_private_t *priv = buf->priv;
|
||||
nv_state_t *nv;
|
||||
|
||||
if (priv == NULL)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
nv = priv->nv;
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (WARN_ON(rc != 0))
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
nv_dma_buf_undup_mem_handles(sp, 0, priv->num_objects, priv);
|
||||
|
||||
rm_dma_buf_put_client_and_device(sp, priv->nv, priv->h_client, priv->h_device,
|
||||
priv->h_subdevice, priv->mig_info);
|
||||
|
||||
nv_dma_buf_free_file_private(priv);
|
||||
buf->priv = NULL;
|
||||
|
||||
nvidia_dev_put(nv->gpu_id, sp);
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int
|
||||
nv_dma_buf_mmap(
|
||||
struct dma_buf *buf,
|
||||
struct vm_area_struct *vma
|
||||
)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_KMAP) || \
|
||||
defined(NV_DMA_BUF_OPS_HAS_MAP)
|
||||
static void*
|
||||
nv_dma_buf_kmap_stub(
|
||||
struct dma_buf *buf,
|
||||
unsigned long page_num
|
||||
)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
nv_dma_buf_kunmap_stub(
|
||||
struct dma_buf *buf,
|
||||
unsigned long page_num,
|
||||
void *addr
|
||||
)
|
||||
{
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC) || \
|
||||
defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC)
|
||||
static void*
|
||||
nv_dma_buf_kmap_atomic_stub(
|
||||
struct dma_buf *buf,
|
||||
unsigned long page_num
|
||||
)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
nv_dma_buf_kunmap_atomic_stub(
|
||||
struct dma_buf *buf,
|
||||
unsigned long page_num,
|
||||
void *addr
|
||||
)
|
||||
{
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
//
|
||||
// Note: Some of the dma-buf operations are mandatory in some kernels.
|
||||
// So stubs are added to prevent dma_buf_export() failure.
|
||||
// The actual implementations of these interfaces is not really required
|
||||
// for the export operation to work.
|
||||
//
|
||||
// Same functions are used for kmap*/map* because of this commit:
|
||||
// f9b67f0014cb: dma-buf: Rename dma-ops to prevent conflict with kunmap_atomic
|
||||
//
|
||||
static const struct dma_buf_ops nv_dma_buf_ops = {
|
||||
.map_dma_buf = nv_dma_buf_map,
|
||||
.unmap_dma_buf = nv_dma_buf_unmap,
|
||||
.release = nv_dma_buf_release,
|
||||
.mmap = nv_dma_buf_mmap,
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_KMAP)
|
||||
.kmap = nv_dma_buf_kmap_stub,
|
||||
.kunmap = nv_dma_buf_kunmap_stub,
|
||||
#endif
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_KMAP_ATOMIC)
|
||||
.kmap_atomic = nv_dma_buf_kmap_atomic_stub,
|
||||
.kunmap_atomic = nv_dma_buf_kunmap_atomic_stub,
|
||||
#endif
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_MAP)
|
||||
.map = nv_dma_buf_kmap_stub,
|
||||
.unmap = nv_dma_buf_kunmap_stub,
|
||||
#endif
|
||||
#if defined(NV_DMA_BUF_OPS_HAS_MAP_ATOMIC)
|
||||
.map_atomic = nv_dma_buf_kmap_atomic_stub,
|
||||
.unmap_atomic = nv_dma_buf_kunmap_atomic_stub,
|
||||
#endif
|
||||
};
|
||||
|
||||
static NV_STATUS
|
||||
nv_dma_buf_create(
|
||||
nv_state_t *nv,
|
||||
nv_ioctl_export_to_dma_buf_fd_t *params
|
||||
)
|
||||
{
|
||||
int rc = 0;
|
||||
NV_STATUS status;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
struct dma_buf *buf = NULL;
|
||||
nv_dma_buf_file_private_t *priv = NULL;
|
||||
NvU32 gpu_id = nv->gpu_id;
|
||||
|
||||
if (!nv->dma_buf_supported)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
if (params->index > (params->totalObjects - params->numObjects))
|
||||
{
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
priv = nv_dma_buf_alloc_file_private(params->totalObjects);
|
||||
if (priv == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to allocate dma-buf private\n");
|
||||
return NV_ERR_NO_MEMORY;
|
||||
}
|
||||
|
||||
priv->total_objects = params->totalObjects;
|
||||
priv->total_size = params->totalSize;
|
||||
priv->nv = nv;
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
status = NV_ERR_NO_MEMORY;
|
||||
goto cleanup_priv;
|
||||
}
|
||||
|
||||
rc = nvidia_dev_get(gpu_id, sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
goto cleanup_sp;
|
||||
}
|
||||
|
||||
status = rm_dma_buf_get_client_and_device(sp, priv->nv,
|
||||
params->hClient,
|
||||
&priv->h_client,
|
||||
&priv->h_device,
|
||||
&priv->h_subdevice,
|
||||
&priv->mig_info);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto cleanup_device;
|
||||
}
|
||||
|
||||
status = nv_dma_buf_dup_mem_handles(sp, priv, params);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto cleanup_client_and_device;
|
||||
}
|
||||
|
||||
#if (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 1)
|
||||
{
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
exp_info.ops = &nv_dma_buf_ops;
|
||||
exp_info.size = params->totalSize;
|
||||
exp_info.flags = O_RDWR | O_CLOEXEC;
|
||||
exp_info.priv = priv;
|
||||
|
||||
buf = dma_buf_export(&exp_info);
|
||||
}
|
||||
#elif (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 4)
|
||||
buf = dma_buf_export(priv, &nv_dma_buf_ops,
|
||||
params->totalSize, O_RDWR | O_CLOEXEC);
|
||||
#elif (NV_DMA_BUF_EXPORT_ARGUMENT_COUNT == 5)
|
||||
buf = dma_buf_export(priv, &nv_dma_buf_ops,
|
||||
params->totalSize, O_RDWR | O_CLOEXEC, NULL);
|
||||
#endif
|
||||
|
||||
if (IS_ERR(buf))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to create dma-buf\n");
|
||||
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
|
||||
goto cleanup_handles;
|
||||
}
|
||||
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
rc = dma_buf_fd(buf, O_RDWR | O_CLOEXEC);
|
||||
if (rc < 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to get dma-buf file descriptor\n");
|
||||
|
||||
//
|
||||
// If dma-buf is successfully created, the dup'd handles
|
||||
// clean-up should be done by the release callback.
|
||||
//
|
||||
dma_buf_put(buf);
|
||||
|
||||
return NV_ERR_OPERATING_SYSTEM;
|
||||
}
|
||||
|
||||
params->fd = rc;
|
||||
|
||||
return NV_OK;
|
||||
|
||||
cleanup_handles:
|
||||
nv_dma_buf_undup_mem_handles(sp, 0, priv->num_objects, priv);
|
||||
|
||||
cleanup_client_and_device:
|
||||
rm_dma_buf_put_client_and_device(sp, priv->nv, priv->h_client, priv->h_device,
|
||||
priv->h_subdevice, priv->mig_info);
|
||||
|
||||
cleanup_device:
|
||||
nvidia_dev_put(gpu_id, sp);
|
||||
|
||||
cleanup_sp:
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
cleanup_priv:
|
||||
nv_dma_buf_free_file_private(priv);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static NV_STATUS
|
||||
nv_dma_buf_reuse(
|
||||
nv_state_t *nv,
|
||||
nv_ioctl_export_to_dma_buf_fd_t *params
|
||||
)
|
||||
{
|
||||
int rc = 0;
|
||||
NV_STATUS status = NV_OK;
|
||||
nvidia_stack_t *sp = NULL;
|
||||
struct dma_buf *buf = NULL;
|
||||
nv_dma_buf_file_private_t *priv = NULL;
|
||||
|
||||
buf = dma_buf_get(params->fd);
|
||||
if (IS_ERR(buf))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: failed to get dma-buf\n");
|
||||
return NV_ERR_OPERATING_SYSTEM;
|
||||
}
|
||||
|
||||
priv = buf->priv;
|
||||
|
||||
if (priv == NULL)
|
||||
{
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
goto cleanup_dmabuf;
|
||||
}
|
||||
|
||||
rc = mutex_lock_interruptible(&priv->lock);
|
||||
if (rc != 0)
|
||||
{
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
goto cleanup_dmabuf;
|
||||
}
|
||||
|
||||
if (params->index > (priv->total_objects - params->numObjects))
|
||||
{
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
rc = nv_kmem_cache_alloc_stack(&sp);
|
||||
if (rc != 0)
|
||||
{
|
||||
status = NV_ERR_NO_MEMORY;
|
||||
goto unlock_priv;
|
||||
}
|
||||
|
||||
status = nv_dma_buf_dup_mem_handles(sp, priv, params);
|
||||
if (status != NV_OK)
|
||||
{
|
||||
goto cleanup_sp;
|
||||
}
|
||||
|
||||
cleanup_sp:
|
||||
nv_kmem_cache_free_stack(sp);
|
||||
|
||||
unlock_priv:
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
cleanup_dmabuf:
|
||||
dma_buf_put(buf);
|
||||
|
||||
return status;
|
||||
}
|
||||
#endif // CONFIG_DMA_SHARED_BUFFER
|
||||
|
||||
NV_STATUS
|
||||
nv_dma_buf_export(
|
||||
nv_state_t *nv,
|
||||
nv_ioctl_export_to_dma_buf_fd_t *params
|
||||
)
|
||||
{
|
||||
#if defined(CONFIG_DMA_SHARED_BUFFER)
|
||||
NV_STATUS status;
|
||||
|
||||
if ((params == NULL) ||
|
||||
(params->totalSize == 0) ||
|
||||
(params->numObjects == 0) ||
|
||||
(params->totalObjects == 0) ||
|
||||
(params->numObjects > NV_DMABUF_EXPORT_MAX_HANDLES) ||
|
||||
(params->numObjects > params->totalObjects))
|
||||
{
|
||||
return NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
//
|
||||
// If fd >= 0, dma-buf already exists with this fd, so get dma-buf from fd.
|
||||
// If fd == -1, dma-buf is not created yet, so create it and then store
|
||||
// additional handles.
|
||||
//
|
||||
if (params->fd == -1)
|
||||
{
|
||||
status = nv_dma_buf_create(nv, params);
|
||||
}
|
||||
else if (params->fd >= 0)
|
||||
{
|
||||
status = nv_dma_buf_reuse(nv, params);
|
||||
}
|
||||
else
|
||||
{
|
||||
status = NV_ERR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
return status;
|
||||
#else
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif // CONFIG_DMA_SHARED_BUFFER
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,412 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
#include "nv-reg.h"
|
||||
#include "nv-frontend.h"
|
||||
|
||||
#if defined(MODULE_LICENSE)
|
||||
|
||||
MODULE_LICENSE("Dual MIT/GPL");
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
#if defined(MODULE_INFO)
|
||||
MODULE_INFO(supported, "external");
|
||||
#endif
|
||||
#if defined(MODULE_VERSION)
|
||||
MODULE_VERSION(NV_VERSION_STRING);
|
||||
#endif
|
||||
|
||||
#ifdef MODULE_ALIAS_CHARDEV_MAJOR
|
||||
MODULE_ALIAS_CHARDEV_MAJOR(NV_MAJOR_DEVICE_NUMBER);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* MODULE_IMPORT_NS() is added by commit id 8651ec01daeda
|
||||
* ("module: add support for symbol namespaces") in 5.4
|
||||
*/
|
||||
#if defined(MODULE_IMPORT_NS)
|
||||
|
||||
|
||||
/*
|
||||
* DMA_BUF namespace is added by commit id 16b0314aa746
|
||||
* ("dma-buf: move dma-buf symbols into the DMA_BUF module namespace") in 5.16
|
||||
*/
|
||||
MODULE_IMPORT_NS(DMA_BUF);
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
static NvU32 nv_num_instances;
|
||||
|
||||
// lock required to protect table.
|
||||
struct semaphore nv_module_table_lock;
|
||||
|
||||
// minor number table
|
||||
nvidia_module_t *nv_minor_num_table[NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX + 1];
|
||||
|
||||
int nvidia_init_module(void);
|
||||
void nvidia_exit_module(void);
|
||||
|
||||
/* EXPORTS to Linux Kernel */
|
||||
|
||||
int nvidia_frontend_open(struct inode *, struct file *);
|
||||
int nvidia_frontend_close(struct inode *, struct file *);
|
||||
unsigned int nvidia_frontend_poll(struct file *, poll_table *);
|
||||
int nvidia_frontend_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
|
||||
long nvidia_frontend_unlocked_ioctl(struct file *, unsigned int, unsigned long);
|
||||
long nvidia_frontend_compat_ioctl(struct file *, unsigned int, unsigned long);
|
||||
int nvidia_frontend_mmap(struct file *, struct vm_area_struct *);
|
||||
|
||||
/* character driver entry points */
|
||||
static struct file_operations nv_frontend_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.poll = nvidia_frontend_poll,
|
||||
#if defined(NV_FILE_OPERATIONS_HAS_IOCTL)
|
||||
.ioctl = nvidia_frontend_ioctl,
|
||||
#endif
|
||||
.unlocked_ioctl = nvidia_frontend_unlocked_ioctl,
|
||||
#if NVCPU_IS_X86_64 || NVCPU_IS_AARCH64
|
||||
.compat_ioctl = nvidia_frontend_compat_ioctl,
|
||||
#endif
|
||||
.mmap = nvidia_frontend_mmap,
|
||||
.open = nvidia_frontend_open,
|
||||
.release = nvidia_frontend_close,
|
||||
};
|
||||
|
||||
/* Helper functions */
|
||||
|
||||
static int add_device(nvidia_module_t *module, nv_linux_state_t *device, NvBool all)
|
||||
{
|
||||
NvU32 i;
|
||||
int rc = -1;
|
||||
|
||||
// look for free a minor number and assign unique minor number to this device
|
||||
for (i = 0; i <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN; i++)
|
||||
{
|
||||
if (nv_minor_num_table[i] == NULL)
|
||||
{
|
||||
nv_minor_num_table[i] = module;
|
||||
device->minor_num = i;
|
||||
if (all == NV_TRUE)
|
||||
{
|
||||
device = device->next;
|
||||
if (device == NULL)
|
||||
{
|
||||
rc = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
rc = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int remove_device(nvidia_module_t *module, nv_linux_state_t *device)
|
||||
{
|
||||
int rc = -1;
|
||||
|
||||
// remove this device from minor_number table
|
||||
if ((device != NULL) && (nv_minor_num_table[device->minor_num] != NULL))
|
||||
{
|
||||
nv_minor_num_table[device->minor_num] = NULL;
|
||||
device->minor_num = 0;
|
||||
rc = 0;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Export functions */
|
||||
|
||||
int nvidia_register_module(nvidia_module_t *module)
|
||||
{
|
||||
int rc = 0;
|
||||
NvU32 ctrl_minor_num;
|
||||
|
||||
down(&nv_module_table_lock);
|
||||
if (module->instance >= NV_MAX_MODULE_INSTANCES)
|
||||
{
|
||||
printk("NVRM: NVIDIA module instance %d registration failed.\n",
|
||||
module->instance);
|
||||
rc = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
|
||||
nv_minor_num_table[ctrl_minor_num] = module;
|
||||
nv_num_instances++;
|
||||
done:
|
||||
up(&nv_module_table_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_register_module);
|
||||
|
||||
int nvidia_unregister_module(nvidia_module_t *module)
|
||||
{
|
||||
int rc = 0;
|
||||
NvU32 ctrl_minor_num;
|
||||
|
||||
down(&nv_module_table_lock);
|
||||
|
||||
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
|
||||
if (nv_minor_num_table[ctrl_minor_num] == NULL)
|
||||
{
|
||||
printk("NVRM: NVIDIA module for %d instance does not exist\n",
|
||||
module->instance);
|
||||
rc = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_minor_num_table[ctrl_minor_num] = NULL;
|
||||
nv_num_instances--;
|
||||
}
|
||||
|
||||
up(&nv_module_table_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_unregister_module);
|
||||
|
||||
int nvidia_frontend_add_device(nvidia_module_t *module, nv_linux_state_t * device)
|
||||
{
|
||||
int rc = -1;
|
||||
NvU32 ctrl_minor_num;
|
||||
|
||||
down(&nv_module_table_lock);
|
||||
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
|
||||
if (nv_minor_num_table[ctrl_minor_num] == NULL)
|
||||
{
|
||||
printk("NVRM: NVIDIA module for %d instance does not exist\n",
|
||||
module->instance);
|
||||
rc = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
rc = add_device(module, device, NV_FALSE);
|
||||
}
|
||||
up(&nv_module_table_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_frontend_add_device);
|
||||
|
||||
int nvidia_frontend_remove_device(nvidia_module_t *module, nv_linux_state_t * device)
|
||||
{
|
||||
int rc = 0;
|
||||
NvU32 ctrl_minor_num;
|
||||
|
||||
down(&nv_module_table_lock);
|
||||
ctrl_minor_num = NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - module->instance;
|
||||
if (nv_minor_num_table[ctrl_minor_num] == NULL)
|
||||
{
|
||||
printk("NVRM: NVIDIA module for %d instance does not exist\n",
|
||||
module->instance);
|
||||
rc = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
rc = remove_device(module, device);
|
||||
}
|
||||
up(&nv_module_table_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(nvidia_frontend_remove_device);
|
||||
|
||||
int nvidia_frontend_open(
|
||||
struct inode *inode,
|
||||
struct file *file
|
||||
)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
nvidia_module_t *module = NULL;
|
||||
|
||||
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
|
||||
|
||||
down(&nv_module_table_lock);
|
||||
module = nv_minor_num_table[minor_num];
|
||||
|
||||
if ((module != NULL) && (module->open != NULL))
|
||||
{
|
||||
// Increment the reference count of module to ensure that module does
|
||||
// not get unloaded if its corresponding device file is open, for
|
||||
// example nvidiaN.ko should not get unloaded if /dev/nvidiaN is open.
|
||||
if (!try_module_get(module->owner))
|
||||
{
|
||||
up(&nv_module_table_lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
rc = module->open(inode, file);
|
||||
if (rc < 0)
|
||||
{
|
||||
module_put(module->owner);
|
||||
}
|
||||
}
|
||||
|
||||
up(&nv_module_table_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int nvidia_frontend_close(
|
||||
struct inode *inode,
|
||||
struct file *file
|
||||
)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
nvidia_module_t *module = NULL;
|
||||
|
||||
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
|
||||
|
||||
module = nv_minor_num_table[minor_num];
|
||||
|
||||
if ((module != NULL) && (module->close != NULL))
|
||||
{
|
||||
rc = module->close(inode, file);
|
||||
|
||||
// Decrement the reference count of module.
|
||||
module_put(module->owner);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
unsigned int nvidia_frontend_poll(
|
||||
struct file *file,
|
||||
poll_table *wait
|
||||
)
|
||||
{
|
||||
unsigned int mask = 0;
|
||||
struct inode *inode = NV_FILE_INODE(file);
|
||||
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
|
||||
nvidia_module_t *module = nv_minor_num_table[minor_num];
|
||||
|
||||
if ((module != NULL) && (module->poll != NULL))
|
||||
mask = module->poll(file, wait);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
int nvidia_frontend_ioctl(
|
||||
struct inode *inode,
|
||||
struct file *file,
|
||||
unsigned int cmd,
|
||||
unsigned long i_arg)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
nvidia_module_t *module = NULL;
|
||||
|
||||
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
|
||||
module = nv_minor_num_table[minor_num];
|
||||
|
||||
if ((module != NULL) && (module->ioctl != NULL))
|
||||
rc = module->ioctl(inode, file, cmd, i_arg);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
long nvidia_frontend_unlocked_ioctl(
|
||||
struct file *file,
|
||||
unsigned int cmd,
|
||||
unsigned long i_arg
|
||||
)
|
||||
{
|
||||
return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg);
|
||||
}
|
||||
|
||||
long nvidia_frontend_compat_ioctl(
|
||||
struct file *file,
|
||||
unsigned int cmd,
|
||||
unsigned long i_arg
|
||||
)
|
||||
{
|
||||
return nvidia_frontend_ioctl(NV_FILE_INODE(file), file, cmd, i_arg);
|
||||
}
|
||||
|
||||
int nvidia_frontend_mmap(
|
||||
struct file *file,
|
||||
struct vm_area_struct *vma
|
||||
)
|
||||
{
|
||||
int rc = -ENODEV;
|
||||
struct inode *inode = NV_FILE_INODE(file);
|
||||
NvU32 minor_num = NV_FRONTEND_MINOR_NUMBER(inode);
|
||||
nvidia_module_t *module = nv_minor_num_table[minor_num];
|
||||
|
||||
if ((module != NULL) && (module->mmap != NULL))
|
||||
rc = module->mmap(file, vma);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __init nvidia_frontend_init_module(void)
|
||||
{
|
||||
int status = 0;
|
||||
|
||||
// initialise nvidia module table;
|
||||
nv_num_instances = 0;
|
||||
memset(nv_minor_num_table, 0, sizeof(nv_minor_num_table));
|
||||
NV_INIT_MUTEX(&nv_module_table_lock);
|
||||
|
||||
status = nvidia_init_module();
|
||||
if (status < 0)
|
||||
{
|
||||
return status;
|
||||
}
|
||||
|
||||
// register char device
|
||||
status = register_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend", &nv_frontend_fops);
|
||||
if (status < 0)
|
||||
{
|
||||
printk("NVRM: register_chrdev() failed!\n");
|
||||
nvidia_exit_module();
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __exit nvidia_frontend_exit_module(void)
|
||||
{
|
||||
/*
|
||||
* If this is the last nvidia_module to be unregistered, cleanup and
|
||||
* unregister char dev
|
||||
*/
|
||||
if (nv_num_instances == 1)
|
||||
{
|
||||
unregister_chrdev(NV_MAJOR_DEVICE_NUMBER, "nvidia-frontend");
|
||||
}
|
||||
|
||||
nvidia_exit_module();
|
||||
}
|
||||
|
||||
module_init(nvidia_frontend_init_module);
|
||||
module_exit(nvidia_frontend_exit_module);
|
||||
|
||||
@@ -1,47 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2012-2013 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NV_FRONTEND_H_
|
||||
#define _NV_FRONTEND_H_
|
||||
|
||||
#include "nvtypes.h"
|
||||
#include "nv-linux.h"
|
||||
#include "nv-register-module.h"
|
||||
|
||||
#define NV_MAX_MODULE_INSTANCES 8
|
||||
|
||||
#define NV_FRONTEND_MINOR_NUMBER(x) minor((x)->i_rdev)
|
||||
|
||||
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX 255
|
||||
#define NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN (NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX - \
|
||||
NV_MAX_MODULE_INSTANCES)
|
||||
|
||||
#define NV_FRONTEND_IS_CONTROL_DEVICE(x) ((x <= NV_FRONTEND_CONTROL_DEVICE_MINOR_MAX) && \
|
||||
(x > NV_FRONTEND_CONTROL_DEVICE_MINOR_MIN))
|
||||
|
||||
int nvidia_frontend_add_device(nvidia_module_t *, nv_linux_state_t *);
|
||||
int nvidia_frontend_remove_device(nvidia_module_t *, nv_linux_state_t *);
|
||||
|
||||
extern nvidia_module_t *nv_minor_num_table[];
|
||||
|
||||
#endif
|
||||
@@ -1,448 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
* nv-ibmnpu.c - interface with the ibmnpu (IBM NVLink Processing Unit) "module"
|
||||
*/
|
||||
#include "nv-linux.h"
|
||||
|
||||
#if defined(NVCPU_PPC64LE)
|
||||
#include "nv-ibmnpu.h"
|
||||
#include "nv-rsync.h"
|
||||
|
||||
/*
|
||||
* Temporary query to get the L1D cache block size directly from the device
|
||||
* tree for the offline cache flush workaround, since the ppc64_caches symbol
|
||||
* is unavailable to us.
|
||||
*/
|
||||
const NvU32 P9_L1D_CACHE_DEFAULT_BLOCK_SIZE = 0x80;
|
||||
|
||||
#if defined(NV_OF_GET_PROPERTY_PRESENT)
|
||||
static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void)
|
||||
{
|
||||
const __be32 *block_size_prop;
|
||||
|
||||
/*
|
||||
* Attempt to look up the block size from device tree. If unavailable, just
|
||||
* return the default that we see on these systems.
|
||||
*/
|
||||
struct device_node *cpu = of_find_node_by_type(NULL, "cpu");
|
||||
if (!cpu)
|
||||
{
|
||||
return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
block_size_prop = of_get_property(cpu, "d-cache-block-size", NULL);
|
||||
if (!block_size_prop)
|
||||
{
|
||||
return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
return be32_to_cpu(*block_size_prop);
|
||||
}
|
||||
#else
|
||||
static NvU32 nv_ibm_get_cpu_l1d_cache_block_size(void)
|
||||
{
|
||||
return P9_L1D_CACHE_DEFAULT_BLOCK_SIZE;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* GPU device memory can be exposed to the kernel as NUMA node memory via the
|
||||
* IBMNPU devices associated with the GPU. The platform firmware will specify
|
||||
* the parameters of where the memory lives in the system address space via
|
||||
* firmware properties on the IBMNPU devices. These properties specify what
|
||||
* memory can be accessed through the IBMNPU device, and the driver can online
|
||||
* a GPU device's memory into the range accessible by its associated IBMNPU
|
||||
* devices.
|
||||
*
|
||||
* This function calls over to the IBMNPU driver to query the parameters from
|
||||
* firmware, and validates that the resulting parameters are acceptable.
|
||||
*/
|
||||
static void nv_init_ibmnpu_numa_info(nv_state_t *nv)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
nv_npu_numa_info_t *npu_numa_info = &nvl->npu->numa_info;
|
||||
struct pci_dev *npu_dev = nvl->npu->devs[0];
|
||||
NvU64 spa, gpa, aper_size;
|
||||
|
||||
/*
|
||||
* Terminology:
|
||||
* - system physical address (spa): 47-bit NVIDIA physical address, which
|
||||
* is the CPU real address with the NVLink address compression scheme
|
||||
* already applied in firmware.
|
||||
* - guest physical address (gpa): 56-bit physical address as seen by the
|
||||
* operating system. This is the base address that we should use for
|
||||
* onlining device memory.
|
||||
*/
|
||||
nvl->numa_info.node_id = ibmnpu_device_get_memory_config(npu_dev, &spa, &gpa,
|
||||
&aper_size);
|
||||
if (nvl->numa_info.node_id == NUMA_NO_NODE)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv, "No NUMA memory aperture found\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Validate that the compressed system physical address is not too wide */
|
||||
if (spa & (~(BIT_ULL(nv_volta_dma_addr_size) - 1)))
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"Invalid NUMA memory system pa 0x%llx"
|
||||
" on IBM-NPU device %04x:%02x:%02x.%u\n",
|
||||
spa, NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
|
||||
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn));
|
||||
goto invalid_numa_config;
|
||||
}
|
||||
|
||||
/*
|
||||
* Validate that the guest physical address is aligned to 128GB.
|
||||
* This alignment requirement comes from the Volta address space
|
||||
* size on POWER9.
|
||||
*/
|
||||
if (!IS_ALIGNED(gpa, BIT_ULL(nv_volta_addr_space_width)))
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"Invalid alignment in NUMA memory guest pa 0x%llx"
|
||||
" on IBM-NPU device %04x:%02x:%02x.%u\n",
|
||||
gpa, NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
|
||||
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn));
|
||||
goto invalid_numa_config;
|
||||
}
|
||||
|
||||
/* Validate that the aperture can map all of the device's framebuffer */
|
||||
if (aper_size < nv->fb->size)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"Insufficient NUMA memory aperture size 0x%llx"
|
||||
" on IBM-NPU device %04x:%02x:%02x.%u (0x%llx required)\n",
|
||||
aper_size, NV_PCI_DOMAIN_NUMBER(npu_dev),
|
||||
NV_PCI_BUS_NUMBER(npu_dev), NV_PCI_SLOT_NUMBER(npu_dev),
|
||||
PCI_FUNC(npu_dev->devfn), nv->fb->size);
|
||||
goto invalid_numa_config;
|
||||
}
|
||||
|
||||
npu_numa_info->compr_sys_phys_addr = spa;
|
||||
npu_numa_info->guest_phys_addr = gpa;
|
||||
|
||||
if (NVreg_EnableUserNUMAManagement)
|
||||
{
|
||||
NV_ATOMIC_SET(nvl->numa_info.status, NV_IOCTL_NUMA_STATUS_OFFLINE);
|
||||
}
|
||||
else
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv, "User-mode NUMA onlining disabled.\n");
|
||||
nvl->numa_info.node_id = NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv, "NUMA memory aperture: "
|
||||
"[spa = 0x%llx, gpa = 0x%llx, aper_size = 0x%llx]\n",
|
||||
spa, gpa, aper_size);
|
||||
|
||||
/* Get the CPU's L1D cache block size for offlining cache flush */
|
||||
npu_numa_info->l1d_cache_block_size = nv_ibm_get_cpu_l1d_cache_block_size();
|
||||
|
||||
return;
|
||||
|
||||
invalid_numa_config:
|
||||
NV_DEV_PRINTF(NV_DBG_ERRORS, nv,
|
||||
"NUMA memory aperture disabled due to invalid firmware configuration\n");
|
||||
nvl->numa_info.node_id = NUMA_NO_NODE;
|
||||
}
|
||||
|
||||
void nv_init_ibmnpu_info(nv_state_t *nv)
|
||||
{
|
||||
#if defined(NV_PNV_PCI_GET_NPU_DEV_PRESENT)
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct pci_dev *npu_dev = pnv_pci_get_npu_dev(nvl->pci_dev, 0);
|
||||
NvU8 dev_count;
|
||||
|
||||
if (!npu_dev)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if (os_alloc_mem((void **)&nvl->npu, sizeof(nv_ibmnpu_info_t)) != NV_OK)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
os_mem_set(nvl->npu, 0, sizeof(nv_ibmnpu_info_t));
|
||||
|
||||
/* Find any other IBMNPU devices attached to this GPU */
|
||||
for (nvl->npu->devs[0] = npu_dev, dev_count = 1;
|
||||
dev_count < NV_MAX_ATTACHED_IBMNPUS; dev_count++)
|
||||
{
|
||||
nvl->npu->devs[dev_count] = pnv_pci_get_npu_dev(nvl->pci_dev, dev_count);
|
||||
if (!nvl->npu->devs[dev_count])
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
nvl->npu->dev_count = dev_count;
|
||||
|
||||
/*
|
||||
* If we run out of space for IBMNPU devices, NV_MAX_ATTACHED_IBMNPUS will
|
||||
* need to be bumped.
|
||||
*/
|
||||
WARN_ON((dev_count == NV_MAX_ATTACHED_IBMNPUS) &&
|
||||
pnv_pci_get_npu_dev(nvl->pci_dev, dev_count));
|
||||
|
||||
ibmnpu_device_get_genregs_info(npu_dev, &nvl->npu->genregs);
|
||||
|
||||
if (nvl->npu->genregs.size > 0)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
|
||||
"IBM-NPU device %04x:%02x:%02x.%u associated with GPU "
|
||||
" has a generation register space 0x%llx-0x%llx\n",
|
||||
NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
|
||||
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn),
|
||||
nvl->npu->genregs.start_addr,
|
||||
nvl->npu->genregs.start_addr + nvl->npu->genregs.size - 1);
|
||||
}
|
||||
else
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
|
||||
"IBM-NPU device %04x:%02x:%02x.%u associated with GPU "
|
||||
"does not support generation registers\n",
|
||||
NV_PCI_DOMAIN_NUMBER(npu_dev), NV_PCI_BUS_NUMBER(npu_dev),
|
||||
NV_PCI_SLOT_NUMBER(npu_dev), PCI_FUNC(npu_dev->devfn));
|
||||
}
|
||||
|
||||
nv_init_ibmnpu_numa_info(nv);
|
||||
#endif
|
||||
}
|
||||
|
||||
void nv_destroy_ibmnpu_info(nv_state_t *nv)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (nvl->npu != NULL)
|
||||
{
|
||||
os_free_mem(nvl->npu);
|
||||
nvl->npu = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int nv_init_ibmnpu_devices(nv_state_t *nv)
|
||||
{
|
||||
NvU8 i;
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (!nvl->npu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < nvl->npu->dev_count; i++)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
|
||||
"Initializing IBM-NPU device %04x:%02x:%02x.%u\n",
|
||||
NV_PCI_DOMAIN_NUMBER(nvl->npu->devs[i]),
|
||||
NV_PCI_BUS_NUMBER(nvl->npu->devs[i]),
|
||||
NV_PCI_SLOT_NUMBER(nvl->npu->devs[i]),
|
||||
PCI_FUNC(nvl->npu->devs[i]->devfn));
|
||||
|
||||
if (ibmnpu_init_device(nvl->npu->devs[i]) != NVL_SUCCESS)
|
||||
{
|
||||
nv_unregister_ibmnpu_devices(nv);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
nvl->npu->initialized_dev_count++;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nv_unregister_ibmnpu_devices(nv_state_t *nv)
|
||||
{
|
||||
NvU8 i;
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (!nvl->npu)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < nvl->npu->initialized_dev_count; i++)
|
||||
{
|
||||
NV_DEV_PRINTF(NV_DBG_SETUP, nv,
|
||||
"Unregistering IBM-NPU device %04x:%02x:%02x.%u\n",
|
||||
NV_PCI_DOMAIN_NUMBER(nvl->npu->devs[i]),
|
||||
NV_PCI_BUS_NUMBER(nvl->npu->devs[i]),
|
||||
NV_PCI_SLOT_NUMBER(nvl->npu->devs[i]),
|
||||
PCI_FUNC(nvl->npu->devs[i]->devfn));
|
||||
|
||||
ibmnpu_unregister_device(nvl->npu->devs[i]);
|
||||
}
|
||||
|
||||
nvl->npu->initialized_dev_count = 0;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *nv, NvU64 *addr,
|
||||
NvU64 *size, void **device)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (nvl->npu == NULL || nvl->npu->genregs.size == 0)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
if (addr)
|
||||
{
|
||||
*addr = nvl->npu->genregs.start_addr;
|
||||
}
|
||||
|
||||
if (size)
|
||||
{
|
||||
*size = nvl->npu->genregs.size;
|
||||
}
|
||||
|
||||
if (device)
|
||||
{
|
||||
*device = (void*)nvl->npu->devs[0];
|
||||
}
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv,
|
||||
NvBool *mode)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (nvl->npu == NULL || nvl->npu->genregs.size == 0)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
*mode = nv_get_rsync_relaxed_ordering_mode(nv);
|
||||
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (nvl->npu == NULL || nvl->npu->genregs.size == 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
nv_wait_for_rsync(nv);
|
||||
}
|
||||
|
||||
int nv_get_ibmnpu_chip_id(nv_state_t *nv)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
|
||||
if (nvl->npu == NULL)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
return ibmnpu_device_get_chip_id(nvl->npu->devs[0]);
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 cpu_virtual, NvU64 size)
|
||||
{
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
NvU64 offset, cbsize;
|
||||
|
||||
/*
|
||||
* The range is commonly an ioremap()ed mapping of the GPU's ATS range and
|
||||
* needs to be compared against the created mappings. Alternatively, kernel
|
||||
* page tables can be dumped through sysfs if CONFIG_PPC_PTDUMP is enabled.
|
||||
*/
|
||||
NV_DEV_PRINTF(NV_DBG_INFO, nv,
|
||||
"Flushing CPU virtual range [0x%llx, 0x%llx)\n",
|
||||
cpu_virtual, cpu_virtual + size);
|
||||
|
||||
cbsize = nvl->npu->numa_info.l1d_cache_block_size;
|
||||
|
||||
CACHE_FLUSH();
|
||||
|
||||
/* Force eviction of any cache lines from the NUMA-onlined region. */
|
||||
for (offset = 0; offset < size; offset += cbsize)
|
||||
{
|
||||
asm volatile("dcbf %0,%1" :: "r" (cpu_virtual), "r" (offset) : "memory");
|
||||
|
||||
/* Reschedule if necessary to avoid lockup warnings */
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
CACHE_FLUSH();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void nv_init_ibmnpu_info(nv_state_t *nv)
|
||||
{
|
||||
}
|
||||
|
||||
void nv_destroy_ibmnpu_info(nv_state_t *nv)
|
||||
{
|
||||
}
|
||||
|
||||
int nv_init_ibmnpu_devices(nv_state_t *nv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nv_unregister_ibmnpu_devices(nv_state_t *nv)
|
||||
{
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_ibmnpu_genreg_info(nv_state_t *nv, NvU64 *addr,
|
||||
NvU64 *size, void **device)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
NV_STATUS NV_API_CALL nv_get_ibmnpu_relaxed_ordering_mode(nv_state_t *nv,
|
||||
NvBool *mode)
|
||||
{
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_wait_for_ibmnpu_rsync(nv_state_t *nv)
|
||||
{
|
||||
}
|
||||
|
||||
int nv_get_ibmnpu_chip_id(nv_state_t *nv)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
void NV_API_CALL nv_ibmnpu_cache_flush_range(nv_state_t *nv, NvU64 virtual, NvU64 size)
|
||||
{
|
||||
}
|
||||
|
||||
void nv_ibmnpu_cache_flush_numa_region(nv_state_t *nv)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1,80 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _NV_IBMNPU_H_
|
||||
#define _NV_IBMNPU_H_
|
||||
|
||||
#if defined(NVCPU_PPC64LE)
|
||||
|
||||
#include "ibmnpu_linux.h"
|
||||
|
||||
#define NV_MAX_ATTACHED_IBMNPUS 6
|
||||
|
||||
typedef struct nv_npu_numa_info
|
||||
{
|
||||
/*
|
||||
* 47-bit NVIDIA 'system physical address': the hypervisor real 56-bit
|
||||
* address with NVLink address compression scheme applied.
|
||||
*/
|
||||
NvU64 compr_sys_phys_addr;
|
||||
|
||||
/*
|
||||
* 56-bit NVIDIA 'guest physical address'/host virtual address. On
|
||||
* unvirtualized systems, applying the NVLink address compression scheme
|
||||
* to this address should be the same as compr_sys_phys_addr.
|
||||
*/
|
||||
NvU64 guest_phys_addr;
|
||||
|
||||
/*
|
||||
* L1 data cache block size on P9 - needed to manually flush/invalidate the
|
||||
* NUMA region from the CPU caches after offlining.
|
||||
*/
|
||||
NvU32 l1d_cache_block_size;
|
||||
} nv_npu_numa_info_t;
|
||||
|
||||
struct nv_ibmnpu_info
|
||||
{
|
||||
NvU8 dev_count;
|
||||
NvU8 initialized_dev_count;
|
||||
struct pci_dev *devs[NV_MAX_ATTACHED_IBMNPUS];
|
||||
ibmnpu_genregs_info_t genregs;
|
||||
nv_npu_numa_info_t numa_info;
|
||||
};
|
||||
|
||||
/*
|
||||
* TODO: These parameters are specific to Volta/P9 configurations, and may
|
||||
* need to be determined dynamically in the future.
|
||||
*/
|
||||
static const NvU32 nv_volta_addr_space_width = 37;
|
||||
static const NvU32 nv_volta_dma_addr_size = 47;
|
||||
|
||||
#endif
|
||||
|
||||
void nv_init_ibmnpu_info(nv_state_t *nv);
|
||||
void nv_destroy_ibmnpu_info(nv_state_t *nv);
|
||||
int nv_init_ibmnpu_devices(nv_state_t *nv);
|
||||
void nv_unregister_ibmnpu_devices(nv_state_t *nv);
|
||||
int nv_get_ibmnpu_chip_id(nv_state_t *nv);
|
||||
void nv_ibmnpu_cache_flush_numa_region(nv_state_t *nv);
|
||||
|
||||
#endif
|
||||
@@ -1,702 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define __NO_VERSION__
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
|
||||
|
||||
#if defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || IS_ENABLED(CONFIG_TEGRA_BPMP)
|
||||
#include <soc/tegra/bpmp-abi.h>
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
|
||||
#include <soc/tegra/bpmp.h>
|
||||
#elif defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT)
|
||||
#include <soc/tegra/tegra_bpmp.h>
|
||||
#endif // IS_ENABLED(CONFIG_TEGRA_BPMP)
|
||||
|
||||
#if defined NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT
|
||||
#include <dt-bindings/interconnect/tegra_icc_id.h>
|
||||
#endif
|
||||
|
||||
#ifdef NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT
|
||||
#include <linux/platform/tegra/mc_utils.h>
|
||||
#endif
|
||||
|
||||
//
|
||||
// IMP requires information from various BPMP and MC driver functions. The
|
||||
// macro below checks that all of the required functions are present.
|
||||
//
|
||||
#define IMP_SUPPORT_FUNCTIONS_PRESENT \
|
||||
NV_IS_EXPORT_SYMBOL_PRESENT_dram_clk_to_mc_clk && \
|
||||
NV_IS_EXPORT_SYMBOL_PRESENT_get_dram_num_channels && \
|
||||
NV_IS_EXPORT_SYMBOL_PRESENT_tegra_dram_types && \
|
||||
(defined(NV_SOC_TEGRA_TEGRA_BPMP_H_PRESENT) || \
|
||||
IS_ENABLED(CONFIG_TEGRA_BPMP)) && \
|
||||
defined(NV_LINUX_PLATFORM_TEGRA_MC_UTILS_H_PRESENT)
|
||||
|
||||
//
|
||||
// Also create a macro to check if all the required ICC symbols are present.
|
||||
// DT endpoints are defined in dt-bindings/interconnect/tegra_icc_id.h.
|
||||
//
|
||||
#define ICC_SUPPORT_FUNCTIONS_PRESENT \
|
||||
defined(NV_DT_BINDINGS_INTERCONNECT_TEGRA_ICC_ID_H_PRESENT)
|
||||
|
||||
#if IMP_SUPPORT_FUNCTIONS_PRESENT
|
||||
static struct mrq_emc_dvfs_latency_response latency_table;
|
||||
static struct mrq_emc_dvfs_emchub_response emchub_table;
|
||||
static struct cmd_iso_client_get_max_bw_response max_bw_table;
|
||||
|
||||
|
||||
|
||||
/*!
|
||||
* @brief Converts the MC driver dram type to RM format
|
||||
*
|
||||
* The MC driver's tegra_dram_types() function returns the dram type as an
|
||||
* enum. We convert it to an NvU32 for better ABI compatibility when stored in
|
||||
* the TEGRA_IMP_IMPORT_DATA structure, which is shared between various
|
||||
* software components.
|
||||
*
|
||||
* @param[in] dram_type Dram type (DRAM_TYPE_LPDDRxxx format).
|
||||
*
|
||||
* @returns dram type (TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDRxxx format).
|
||||
*/
|
||||
static inline NvU32
|
||||
nv_imp_convert_dram_type_to_rm_format
|
||||
(
|
||||
enum dram_types dram_type
|
||||
)
|
||||
{
|
||||
NvU32 rm_dram_type;
|
||||
|
||||
switch (dram_type)
|
||||
{
|
||||
case DRAM_TYPE_LPDDR4_16CH_ECC_1RANK:
|
||||
case DRAM_TYPE_LPDDR4_16CH_ECC_2RANK:
|
||||
case DRAM_TYPE_LPDDR4_8CH_ECC_1RANK:
|
||||
case DRAM_TYPE_LPDDR4_8CH_ECC_2RANK:
|
||||
case DRAM_TYPE_LPDDR4_4CH_ECC_1RANK:
|
||||
case DRAM_TYPE_LPDDR4_4CH_ECC_2RANK:
|
||||
case DRAM_TYPE_LPDDR4_16CH_1RANK:
|
||||
case DRAM_TYPE_LPDDR4_16CH_2RANK:
|
||||
case DRAM_TYPE_LPDDR4_8CH_1RANK:
|
||||
case DRAM_TYPE_LPDDR4_8CH_2RANK:
|
||||
case DRAM_TYPE_LPDDR4_4CH_1RANK:
|
||||
case DRAM_TYPE_LPDDR4_4CH_2RANK:
|
||||
rm_dram_type = TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR4;
|
||||
break;
|
||||
case DRAM_TYPE_LPDDR5_16CH_ECC_1RANK:
|
||||
case DRAM_TYPE_LPDDR5_16CH_ECC_2RANK:
|
||||
case DRAM_TYPE_LPDDR5_8CH_ECC_1RANK:
|
||||
case DRAM_TYPE_LPDDR5_8CH_ECC_2RANK:
|
||||
case DRAM_TYPE_LPDDR5_4CH_ECC_1RANK:
|
||||
case DRAM_TYPE_LPDDR5_4CH_ECC_2RANK:
|
||||
case DRAM_TYPE_LPDDR5_16CH_1RANK:
|
||||
case DRAM_TYPE_LPDDR5_16CH_2RANK:
|
||||
case DRAM_TYPE_LPDDR5_8CH_1RANK:
|
||||
case DRAM_TYPE_LPDDR5_8CH_2RANK:
|
||||
case DRAM_TYPE_LPDDR5_4CH_1RANK:
|
||||
case DRAM_TYPE_LPDDR5_4CH_2RANK:
|
||||
rm_dram_type = TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_LPDDR5;
|
||||
break;
|
||||
default:
|
||||
rm_dram_type = TEGRA_IMP_IMPORT_DATA_DRAM_TYPE_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
|
||||
return rm_dram_type;
|
||||
}
|
||||
#endif // IMP_SUPPORT_FUNCTIONS_PRESENT
|
||||
|
||||
/*!
|
||||
* @brief Collects IMP-relevant BPMP data and saves for later
|
||||
*
|
||||
* @param[in] nvl OS-specific device state
|
||||
*
|
||||
* @returns NV_OK if successful,
|
||||
* NV_ERR_GENERIC if the BPMP API returns an error,
|
||||
* NV_ERR_MISSING_TABLE_ENTRY if the latency table has no entries,
|
||||
* NV_ERR_INVALID_DATA if the number of clock entries in the latency
|
||||
* table does not match the number of entries in the emchub table, or
|
||||
* NV_ERR_NOT_SUPPORTED if the functionality is not available.
|
||||
*/
|
||||
NV_STATUS
|
||||
nv_imp_get_bpmp_data
|
||||
(
|
||||
nv_linux_state_t *nvl
|
||||
)
|
||||
{
|
||||
#if IMP_SUPPORT_FUNCTIONS_PRESENT
|
||||
NV_STATUS status = NV_OK;
|
||||
int rc;
|
||||
int i;
|
||||
NvBool bApiTableInvalid = NV_FALSE;
|
||||
static const struct iso_max_bw dummy_iso_bw_pairs[] =
|
||||
{ { 204000U, 1472000U },
|
||||
{ 533000U, 3520000U },
|
||||
{ 665000U, 4352000U },
|
||||
{ 800000U, 5184000U },
|
||||
{ 1066000U, 6784000U },
|
||||
{ 1375000U, 8704000U },
|
||||
{ 1600000U, 10112000U },
|
||||
{ 1866000U, 11712000U },
|
||||
{ 2133000U, 13376000U },
|
||||
{ 2400000U, 15040000U },
|
||||
{ 2750000U, 17152000U },
|
||||
{ 3000000U, 18688000U },
|
||||
{ 3200000U, 20800000U }
|
||||
};
|
||||
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
|
||||
struct tegra_bpmp *bpmp;
|
||||
struct tegra_bpmp_message msg;
|
||||
struct mrq_iso_client_request iso_client_request;
|
||||
|
||||
bpmp = tegra_bpmp_get(nvl->dev);
|
||||
if (IS_ERR(bpmp))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: Error getting bpmp struct: %s\n",
|
||||
PTR_ERR(bpmp));
|
||||
return NV_ERR_GENERIC;
|
||||
}
|
||||
// Get the table of dramclk / DVFS latency pairs.
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.mrq = MRQ_EMC_DVFS_LATENCY;
|
||||
msg.tx.data = NULL;
|
||||
msg.tx.size = 0;
|
||||
msg.rx.data = &latency_table;
|
||||
msg.rx.size = sizeof(latency_table);
|
||||
|
||||
rc = tegra_bpmp_transfer(bpmp, &msg);
|
||||
#else
|
||||
// Get the table of dramclk / DVFS latency pairs.
|
||||
rc = tegra_bpmp_send_receive(MRQ_EMC_DVFS_LATENCY,
|
||||
NULL,
|
||||
0,
|
||||
&latency_table,
|
||||
sizeof(latency_table));
|
||||
#endif
|
||||
if (rc != 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"MRQ_EMC_DVFS_LATENCY returns error code %d\n", rc);
|
||||
status = NV_ERR_GENERIC;
|
||||
goto Cleanup;
|
||||
}
|
||||
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"MRQ_EMC_DVFS_LATENCY table size = %u\n",
|
||||
latency_table.num_pairs);
|
||||
|
||||
if (latency_table.num_pairs == 0U)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"MRQ_EMC_DVFS_LATENCY table has no entries\n", rc);
|
||||
status = NV_ERR_MISSING_TABLE_ENTRY;
|
||||
goto Cleanup;
|
||||
}
|
||||
|
||||
// Get the table of dramclk / emchubclk pairs.
|
||||
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
|
||||
memset(&msg, 0, sizeof(msg));
|
||||
msg.mrq = MRQ_EMC_DVFS_EMCHUB;
|
||||
msg.tx.data = NULL;
|
||||
msg.tx.size = 0;
|
||||
msg.rx.data = &emchub_table;
|
||||
msg.rx.size = sizeof(emchub_table);
|
||||
|
||||
rc = tegra_bpmp_transfer(bpmp, &msg);
|
||||
#else
|
||||
rc = tegra_bpmp_send_receive(MRQ_EMC_DVFS_EMCHUB,
|
||||
NULL,
|
||||
0,
|
||||
&emchub_table,
|
||||
sizeof(emchub_table));
|
||||
#endif
|
||||
if (rc != 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"MRQ_EMC_DVFS_EMCHUB returns error code %d\n", rc);
|
||||
status = NV_ERR_GENERIC;
|
||||
goto Cleanup;
|
||||
}
|
||||
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"MRQ_EMC_DVFS_EMCHUB table size = %u\n",
|
||||
emchub_table.num_pairs);
|
||||
|
||||
if (latency_table.num_pairs != emchub_table.num_pairs)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"MRQ_EMC_DVFS_LATENCY table size (%u) does not match MRQ_EMC_DVFS_EMCHUB table size (%u)\n",
|
||||
latency_table.num_pairs,
|
||||
emchub_table.num_pairs);
|
||||
status = NV_ERR_INVALID_DATA;
|
||||
goto Cleanup;
|
||||
}
|
||||
|
||||
// Get the table of dramclk / max ISO BW pairs.
|
||||
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
|
||||
memset(&iso_client_request, 0, sizeof(iso_client_request));
|
||||
iso_client_request.cmd = CMD_ISO_CLIENT_GET_MAX_BW;
|
||||
iso_client_request.max_isobw_req.id = TEGRA_ICC_DISPLAY;
|
||||
msg.mrq = MRQ_ISO_CLIENT;
|
||||
msg.tx.data = &iso_client_request;
|
||||
msg.tx.size = sizeof(iso_client_request);
|
||||
msg.rx.data = &max_bw_table;
|
||||
msg.rx.size = sizeof(max_bw_table);
|
||||
|
||||
rc = tegra_bpmp_transfer(bpmp, &msg);
|
||||
#else
|
||||
// Maybe we don't need the old implementation "else" clause cases anymore.
|
||||
NV_ASSERT(NV_FALSE);
|
||||
#endif
|
||||
if ((rc != 0) || (max_bw_table.num_pairs == 0U))
|
||||
{
|
||||
if (rc != 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"MRQ_ISO_CLIENT returns error code %d\n", rc);
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"CMD_ISO_CLIENT_GET_MAX_BW table does not contain any entries\n");
|
||||
}
|
||||
bApiTableInvalid = NV_TRUE;
|
||||
}
|
||||
else
|
||||
{
|
||||
//
|
||||
// Check for entries with ISO BW = 0. It's possible that one entry may
|
||||
// be zero, but they should not all be zero. (On simulation, due to bug
|
||||
// 3379796, the API is currently not working; it returns 13 entries,
|
||||
// each with ISO BW = 0.)
|
||||
//
|
||||
bApiTableInvalid = NV_TRUE;
|
||||
for (i = 0; i < max_bw_table.num_pairs; i++)
|
||||
{
|
||||
if (max_bw_table.pairs[i].iso_bw != 0U)
|
||||
{
|
||||
bApiTableInvalid = NV_FALSE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (bApiTableInvalid)
|
||||
{
|
||||
//
|
||||
// If the table is not returned correctly, for now, fill in a dummy
|
||||
// table.
|
||||
//
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"Creating dummy CMD_ISO_CLIENT_GET_MAX_BW table\n");
|
||||
max_bw_table.num_pairs = sizeof(dummy_iso_bw_pairs) /
|
||||
sizeof(dummy_iso_bw_pairs[0]);
|
||||
for (i = 0; i < max_bw_table.num_pairs; i++)
|
||||
{
|
||||
max_bw_table.pairs[i].freq = dummy_iso_bw_pairs[i].freq;
|
||||
max_bw_table.pairs[i].iso_bw = dummy_iso_bw_pairs[i].iso_bw;
|
||||
}
|
||||
}
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"CMD_ISO_CLIENT_GET_MAX_BW table size = %u\n",
|
||||
max_bw_table.num_pairs);
|
||||
|
||||
Cleanup:
|
||||
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
|
||||
tegra_bpmp_put(bpmp);
|
||||
#endif
|
||||
return status;
|
||||
#else // IMP_SUPPORT_FUNCTIONS_PRESENT
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Returns IMP-relevant data collected from other modules
|
||||
*
|
||||
* @param[out] tegra_imp_import_data Structure to receive the data
|
||||
*
|
||||
* @returns NV_OK if successful,
|
||||
* NV_ERR_BUFFER_TOO_SMALL if the array in TEGRA_IMP_IMPORT_DATA is
|
||||
* too small,
|
||||
* NV_ERR_INVALID_DATA if the latency table has different mclk
|
||||
* frequencies, compared with the emchub table, or
|
||||
* NV_ERR_NOT_SUPPORTED if the functionality is not available.
|
||||
*/
|
||||
NV_STATUS NV_API_CALL
|
||||
nv_imp_get_import_data
|
||||
(
|
||||
TEGRA_IMP_IMPORT_DATA *tegra_imp_import_data
|
||||
)
|
||||
{
|
||||
#if IMP_SUPPORT_FUNCTIONS_PRESENT
|
||||
NvU32 i;
|
||||
NvU32 bwTableIndex = 0U;
|
||||
NvU32 dram_clk_freq_khz;
|
||||
enum dram_types dram_type;
|
||||
|
||||
tegra_imp_import_data->num_dram_clk_entries = latency_table.num_pairs;
|
||||
if (ARRAY_SIZE(tegra_imp_import_data->dram_clk_instance) <
|
||||
latency_table.num_pairs)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"ERROR: TEGRA_IMP_IMPORT_DATA struct needs to have at least "
|
||||
"%d dram_clk_instance entries, but only %d are allocated\n",
|
||||
latency_table.num_pairs,
|
||||
ARRAY_SIZE(tegra_imp_import_data->dram_clk_instance));
|
||||
return NV_ERR_BUFFER_TOO_SMALL;
|
||||
}
|
||||
|
||||
//
|
||||
// Copy data that we collected earlier in the BPMP tables into the caller's
|
||||
// IMP import structure.
|
||||
//
|
||||
for (i = 0U; i < latency_table.num_pairs; i++)
|
||||
{
|
||||
dram_clk_freq_khz = latency_table.pairs[i].freq;
|
||||
//
|
||||
// For each dramclk frequency, we get some information from the EMCHUB
|
||||
// table and some information from the LATENCY table. We expect both
|
||||
// tables to have entries for the same dramclk frequencies.
|
||||
//
|
||||
if (dram_clk_freq_khz != emchub_table.pairs[i].freq)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"MRQ_EMC_DVFS_LATENCY index #%d dramclk freq (%d KHz) does not match "
|
||||
"MRQ_EMC_DVFS_EMCHUB index #%d dramclk freq (%d KHz)\n",
|
||||
i, latency_table.pairs[i].freq,
|
||||
i, emchub_table.pairs[i].freq);
|
||||
return NV_ERR_INVALID_DATA;
|
||||
}
|
||||
|
||||
// Copy a few values to the caller's table.
|
||||
tegra_imp_import_data->dram_clk_instance[i].dram_clk_freq_khz =
|
||||
dram_clk_freq_khz;
|
||||
tegra_imp_import_data->dram_clk_instance[i].switch_latency_ns =
|
||||
latency_table.pairs[i].latency;
|
||||
tegra_imp_import_data->dram_clk_instance[i].mc_clk_khz =
|
||||
dram_clk_to_mc_clk(dram_clk_freq_khz / 1000U) * 1000U;
|
||||
|
||||
// MC hubclk is 1/2 of scf clk, which is the same as EMCHUB clk.
|
||||
tegra_imp_import_data->dram_clk_instance[i].mchub_clk_khz =
|
||||
emchub_table.pairs[i].hub_freq / 2U;
|
||||
|
||||
//
|
||||
// The ISO BW table may have more entries then the number of dramclk
|
||||
// frequencies supported on current chip (i.e., more entries than we
|
||||
// have in the EMCHUB and LATENCY tables). For each dramclk entry that
|
||||
// we are filling out, search through the ISO BW table to find the
|
||||
// largest dramclk less than or equal to the dramclk frequency for
|
||||
// index "i", and use that ISO BW entry. (We assume all tables have
|
||||
// their entries in order of increasing dramclk frequency.)
|
||||
//
|
||||
// Note: Some of the dramclk frequencies in the ISO BW table have been
|
||||
// observed to be "rounded down" (e.g., 665000 KHz instead of 665600
|
||||
// KHz).
|
||||
//
|
||||
while ((bwTableIndex + 1U < max_bw_table.num_pairs) &&
|
||||
(dram_clk_freq_khz >= max_bw_table.pairs[bwTableIndex + 1U].freq))
|
||||
{
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"Max ISO BW table: index %u, dramclk = %u KHz, max ISO BW = %u KB/sec\n",
|
||||
bwTableIndex,
|
||||
max_bw_table.pairs[bwTableIndex].freq,
|
||||
max_bw_table.pairs[bwTableIndex].iso_bw);
|
||||
bwTableIndex++;
|
||||
}
|
||||
if (dram_clk_freq_khz >= max_bw_table.pairs[bwTableIndex].freq)
|
||||
{
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"For dramclk = %u KHz, setting max ISO BW = %u KB/sec\n",
|
||||
dram_clk_freq_khz,
|
||||
max_bw_table.pairs[bwTableIndex].iso_bw);
|
||||
tegra_imp_import_data->dram_clk_instance[i].max_iso_bw_kbps =
|
||||
max_bw_table.pairs[bwTableIndex].iso_bw;
|
||||
}
|
||||
else
|
||||
{
|
||||
//
|
||||
// Something went wrong. Maybe the ISO BW table doesn't have any
|
||||
// entries with dramclk frequency as small as the frequency in the
|
||||
// EMCHUB and LATENCY tables, or maybe the entries are out of
|
||||
// order.
|
||||
//
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"Couldn't get max ISO BW for dramclk = %u KHz\n",
|
||||
dram_clk_freq_khz);
|
||||
return NV_ERR_INVALID_DATA;
|
||||
}
|
||||
}
|
||||
|
||||
dram_type = tegra_dram_types();
|
||||
|
||||
tegra_imp_import_data->dram_type =
|
||||
nv_imp_convert_dram_type_to_rm_format(dram_type);
|
||||
|
||||
tegra_imp_import_data->num_dram_channels = get_dram_num_channels();
|
||||
|
||||
// Record the overall maximum possible ISO BW.
|
||||
i = latency_table.num_pairs - 1U;
|
||||
tegra_imp_import_data->max_iso_bw_kbps =
|
||||
tegra_imp_import_data->dram_clk_instance[i].max_iso_bw_kbps;
|
||||
|
||||
return NV_OK;
|
||||
#else // IMP_SUPPORT_FUNCTIONS_PRESENT
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Tells BPMP whether or not RFL is valid
|
||||
*
|
||||
* Display HW generates an ok_to_switch signal which asserts when mempool
|
||||
* occupancy is high enough to be able to turn off memory long enough to
|
||||
* execute a dramclk frequency switch without underflowing display output.
|
||||
* ok_to_switch drives the RFL ("request for latency") signal in the memory
|
||||
* unit, and the switch sequencer waits for this signal to go active before
|
||||
* starting a dramclk switch. However, if the signal is not valid (e.g., if
|
||||
* display HW or SW has not been initialized yet), the switch sequencer ignores
|
||||
* the signal. This API tells BPMP whether or not the signal is valid.
|
||||
*
|
||||
* @param[in] nv Per GPU Linux state
|
||||
* @param[in] bEnable True if RFL will be valid; false if invalid
|
||||
*
|
||||
* @returns NV_OK if successful,
|
||||
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
|
||||
* NV_ERR_GENERIC if some other kind of error occurred.
|
||||
*/
|
||||
NV_STATUS NV_API_CALL
|
||||
nv_imp_enable_disable_rfl
|
||||
(
|
||||
nv_state_t *nv,
|
||||
NvBool bEnable
|
||||
)
|
||||
{
|
||||
NV_STATUS status = NV_ERR_NOT_SUPPORTED;
|
||||
#if IMP_SUPPORT_FUNCTIONS_PRESENT
|
||||
#if IS_ENABLED(CONFIG_TEGRA_BPMP)
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
struct tegra_bpmp *bpmp = tegra_bpmp_get(nvl->dev);
|
||||
struct tegra_bpmp_message msg;
|
||||
struct mrq_emc_disp_rfl_request emc_disp_rfl_request;
|
||||
int rc;
|
||||
|
||||
memset(&emc_disp_rfl_request, 0, sizeof(emc_disp_rfl_request));
|
||||
emc_disp_rfl_request.mode = bEnable ? EMC_DISP_RFL_MODE_ENABLED :
|
||||
EMC_DISP_RFL_MODE_DISABLED;
|
||||
msg.mrq = MRQ_EMC_DISP_RFL;
|
||||
msg.tx.data = &emc_disp_rfl_request;
|
||||
msg.tx.size = sizeof(emc_disp_rfl_request);
|
||||
msg.rx.data = NULL;
|
||||
msg.rx.size = 0;
|
||||
|
||||
rc = tegra_bpmp_transfer(bpmp, &msg);
|
||||
if (rc == 0)
|
||||
{
|
||||
nv_printf(NV_DBG_INFO,
|
||||
"\"Wait for RFL\" is %s via MRQ_EMC_DISP_RFL\n",
|
||||
bEnable ? "enabled" : "disabled");
|
||||
status = NV_OK;
|
||||
}
|
||||
else
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"MRQ_EMC_DISP_RFL failed to %s \"Wait for RFL\" (error code = %d)\n",
|
||||
bEnable ? "enable" : "disable",
|
||||
rc);
|
||||
status = NV_ERR_GENERIC;
|
||||
}
|
||||
#else
|
||||
// Maybe we don't need the old implementation "else" clause cases anymore.
|
||||
NV_ASSERT(NV_FALSE);
|
||||
#endif
|
||||
#endif
|
||||
return status;
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Obtains a handle for the display data path
|
||||
*
|
||||
* If a handle is obtained successfully, it is not returned to the caller; it
|
||||
* is saved for later use by subsequent nv_imp_icc_set_bw calls.
|
||||
* nv_imp_icc_get must be called prior to calling nv_imp_icc_set_bw.
|
||||
*
|
||||
* @param[out] nv Per GPU Linux state
|
||||
*
|
||||
* @returns NV_OK if successful,
|
||||
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
|
||||
* NV_ERR_GENERIC if some other error occurred.
|
||||
*/
|
||||
NV_STATUS NV_API_CALL
|
||||
nv_imp_icc_get
|
||||
(
|
||||
nv_state_t *nv
|
||||
)
|
||||
{
|
||||
#if ICC_SUPPORT_FUNCTIONS_PRESENT
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
NV_STATUS status = NV_OK;
|
||||
|
||||
#if defined(NV_ICC_GET_PRESENT)
|
||||
struct device_node *np;
|
||||
nvl->nv_imp_icc_path = NULL;
|
||||
// Check if ICC is present in the device tree, and enabled.
|
||||
np = of_find_node_by_path("/icc");
|
||||
if (np != NULL)
|
||||
{
|
||||
if (of_device_is_available(np))
|
||||
{
|
||||
// Get the ICC data path.
|
||||
nvl->nv_imp_icc_path =
|
||||
icc_get(nvl->dev, TEGRA_ICC_DISPLAY, TEGRA_ICC_PRIMARY);
|
||||
}
|
||||
of_node_put(np);
|
||||
}
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: icc_get() not present\n");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
|
||||
if (nvl->nv_imp_icc_path == NULL)
|
||||
{
|
||||
nv_printf(NV_DBG_INFO, "NVRM: icc_get disabled\n");
|
||||
status = NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
else if (IS_ERR(nvl->nv_imp_icc_path))
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS, "NVRM: invalid path = %s\n",
|
||||
PTR_ERR(nvl->nv_imp_icc_path));
|
||||
nvl->nv_imp_icc_path = NULL;
|
||||
status = NV_ERR_GENERIC;
|
||||
}
|
||||
return status;
|
||||
#else
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Releases the handle obtained by nv_imp_icc_get
|
||||
*
|
||||
* @param[in] nv Per GPU Linux state
|
||||
*/
|
||||
void
|
||||
nv_imp_icc_put
|
||||
(
|
||||
nv_state_t *nv
|
||||
)
|
||||
{
|
||||
#if ICC_SUPPORT_FUNCTIONS_PRESENT
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
#if defined(NV_ICC_PUT_PRESENT)
|
||||
if (nvl->nv_imp_icc_path != NULL)
|
||||
{
|
||||
icc_put(nvl->nv_imp_icc_path);
|
||||
}
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "icc_put() not present\n");
|
||||
#endif
|
||||
nvl->nv_imp_icc_path = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*!
|
||||
* @brief Allocates a specified amount of ISO memory bandwidth for display
|
||||
*
|
||||
* floor_bw_kbps is the minimum required (i.e., floor) dramclk frequency
|
||||
* multiplied by the width of the pipe over which the display data will travel.
|
||||
* (It is understood that the bandwidth calculated by multiplying the clock
|
||||
* frequency by the pipe width will not be realistically achievable, due to
|
||||
* overhead in the memory subsystem. ICC will not actually use the bandwidth
|
||||
* value, except to reverse the calculation to get the required dramclk
|
||||
* frequency.)
|
||||
*
|
||||
* nv_imp_icc_get must be called prior to calling this function.
|
||||
*
|
||||
* @param[in] nv Per GPU Linux state
|
||||
* @param[in] avg_bw_kbps Amount of ISO memory bandwidth requested
|
||||
* @param[in] floor_bw_kbps Min required dramclk freq * pipe width
|
||||
*
|
||||
* @returns NV_OK if successful,
|
||||
* NV_ERR_INSUFFICIENT_RESOURCES if one of the bandwidth values is too
|
||||
* high, and bandwidth cannot be allocated,
|
||||
* NV_ERR_NOT_SUPPORTED if the functionality is not available, or
|
||||
* NV_ERR_GENERIC if some other kind of error occurred.
|
||||
*/
|
||||
NV_STATUS NV_API_CALL
|
||||
nv_imp_icc_set_bw
|
||||
(
|
||||
nv_state_t *nv,
|
||||
NvU32 avg_bw_kbps,
|
||||
NvU32 floor_bw_kbps
|
||||
)
|
||||
{
|
||||
#if ICC_SUPPORT_FUNCTIONS_PRESENT
|
||||
nv_linux_state_t *nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
int rc;
|
||||
NV_STATUS status = NV_OK;
|
||||
|
||||
//
|
||||
// avg_bw_kbps can be either ISO bw request or NISO bw request.
|
||||
// Use floor_bw_kbps to make floor requests.
|
||||
//
|
||||
#if defined(NV_ICC_SET_BW_PRESENT)
|
||||
//
|
||||
// nv_imp_icc_path will be NULL on AV + L systems because ICC is disabled.
|
||||
// In this case, skip the allocation call, and just return a success
|
||||
// status.
|
||||
//
|
||||
if (nvl->nv_imp_icc_path == NULL)
|
||||
{
|
||||
return NV_OK;
|
||||
}
|
||||
rc = icc_set_bw(nvl->nv_imp_icc_path, avg_bw_kbps, floor_bw_kbps);
|
||||
#else
|
||||
nv_printf(NV_DBG_ERRORS, "icc_set_bw() not present\n");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
|
||||
if (rc < 0)
|
||||
{
|
||||
// A negative return value indicates an error.
|
||||
if (rc == -ENOMEM)
|
||||
{
|
||||
status = NV_ERR_INSUFFICIENT_RESOURCES;
|
||||
}
|
||||
else
|
||||
{
|
||||
status = NV_ERR_GENERIC;
|
||||
}
|
||||
}
|
||||
return status;
|
||||
#else
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,736 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "os-interface.h"
|
||||
#include "nv.h"
|
||||
#include "nv-linux.h"
|
||||
|
||||
static inline void nv_set_contig_memory_uc(nvidia_pte_t *page_ptr, NvU32 num_pages)
|
||||
{
|
||||
#if defined(NV_SET_MEMORY_UC_PRESENT)
|
||||
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
set_memory_uc(addr, num_pages);
|
||||
#elif defined(NV_SET_PAGES_UC_PRESENT)
|
||||
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
|
||||
set_pages_uc(page, num_pages);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void nv_set_contig_memory_wb(nvidia_pte_t *page_ptr, NvU32 num_pages)
|
||||
{
|
||||
#if defined(NV_SET_MEMORY_UC_PRESENT)
|
||||
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
set_memory_wb(addr, num_pages);
|
||||
#elif defined(NV_SET_PAGES_UC_PRESENT)
|
||||
struct page *page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
|
||||
set_pages_wb(page, num_pages);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int nv_set_memory_array_type_present(NvU32 type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
|
||||
case NV_MEMORY_UNCACHED:
|
||||
return 1;
|
||||
case NV_MEMORY_WRITEBACK:
|
||||
return 1;
|
||||
#endif
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int nv_set_pages_array_type_present(NvU32 type)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
|
||||
case NV_MEMORY_UNCACHED:
|
||||
return 1;
|
||||
case NV_MEMORY_WRITEBACK:
|
||||
return 1;
|
||||
#endif
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void nv_set_memory_array_type(
|
||||
unsigned long *pages,
|
||||
NvU32 num_pages,
|
||||
NvU32 type
|
||||
)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
|
||||
case NV_MEMORY_UNCACHED:
|
||||
set_memory_array_uc(pages, num_pages);
|
||||
break;
|
||||
case NV_MEMORY_WRITEBACK:
|
||||
set_memory_array_wb(pages, num_pages);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s(): type %d unimplemented\n",
|
||||
__FUNCTION__, type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void nv_set_pages_array_type(
|
||||
struct page **pages,
|
||||
NvU32 num_pages,
|
||||
NvU32 type
|
||||
)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
#if defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
|
||||
case NV_MEMORY_UNCACHED:
|
||||
set_pages_array_uc(pages, num_pages);
|
||||
break;
|
||||
case NV_MEMORY_WRITEBACK:
|
||||
set_pages_array_wb(pages, num_pages);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s(): type %d unimplemented\n",
|
||||
__FUNCTION__, type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void nv_set_contig_memory_type(
|
||||
nvidia_pte_t *page_ptr,
|
||||
NvU32 num_pages,
|
||||
NvU32 type
|
||||
)
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case NV_MEMORY_UNCACHED:
|
||||
nv_set_contig_memory_uc(page_ptr, num_pages);
|
||||
break;
|
||||
case NV_MEMORY_WRITEBACK:
|
||||
nv_set_contig_memory_wb(page_ptr, num_pages);
|
||||
break;
|
||||
default:
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s(): type %d unimplemented\n",
|
||||
__FUNCTION__, type);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void nv_set_memory_type(nv_alloc_t *at, NvU32 type)
|
||||
{
|
||||
NvU32 i;
|
||||
NV_STATUS status = NV_OK;
|
||||
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
|
||||
unsigned long *pages = NULL;
|
||||
#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
|
||||
struct page **pages = NULL;
|
||||
#else
|
||||
unsigned long *pages = NULL;
|
||||
#endif
|
||||
|
||||
nvidia_pte_t *page_ptr;
|
||||
struct page *page;
|
||||
|
||||
if (nv_set_memory_array_type_present(type))
|
||||
{
|
||||
status = os_alloc_mem((void **)&pages,
|
||||
at->num_pages * sizeof(unsigned long));
|
||||
|
||||
}
|
||||
else if (nv_set_pages_array_type_present(type))
|
||||
{
|
||||
status = os_alloc_mem((void **)&pages,
|
||||
at->num_pages * sizeof(struct page*));
|
||||
}
|
||||
|
||||
if (status != NV_OK)
|
||||
pages = NULL;
|
||||
|
||||
//
|
||||
// If the set_{memory,page}_array_* functions are in the kernel interface,
|
||||
// it's faster to use them since they work on non-contiguous memory,
|
||||
// whereas the set_{memory,page}_* functions do not.
|
||||
//
|
||||
if (pages)
|
||||
{
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
page = NV_GET_PAGE_STRUCT(page_ptr->phys_addr);
|
||||
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
|
||||
pages[i] = (unsigned long)page_address(page);
|
||||
#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
|
||||
pages[i] = page;
|
||||
#endif
|
||||
}
|
||||
#if defined(NV_SET_MEMORY_ARRAY_UC_PRESENT)
|
||||
nv_set_memory_array_type(pages, at->num_pages, type);
|
||||
#elif defined(NV_SET_PAGES_ARRAY_UC_PRESENT)
|
||||
nv_set_pages_array_type(pages, at->num_pages, type);
|
||||
#endif
|
||||
os_free_mem(pages);
|
||||
}
|
||||
|
||||
//
|
||||
// If the set_{memory,page}_array_* functions aren't present in the kernel
|
||||
// interface, each page has to be set individually, which has been measured
|
||||
// to be ~10x slower than using the set_{memory,page}_array_* functions.
|
||||
//
|
||||
else
|
||||
{
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
nv_set_contig_memory_type(at->page_table[i], 1, type);
|
||||
}
|
||||
}
|
||||
|
||||
static NvU64 nv_get_max_sysmem_address(void)
|
||||
{
|
||||
NvU64 global_max_pfn = 0ULL;
|
||||
int node_id;
|
||||
|
||||
for_each_online_node(node_id)
|
||||
{
|
||||
global_max_pfn = max(global_max_pfn, (NvU64)node_end_pfn(node_id));
|
||||
}
|
||||
|
||||
return ((global_max_pfn + 1) << PAGE_SHIFT) - 1;
|
||||
}
|
||||
|
||||
static unsigned int nv_compute_gfp_mask(
|
||||
nv_state_t *nv,
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
unsigned int gfp_mask = NV_GFP_KERNEL;
|
||||
struct device *dev = at->dev;
|
||||
|
||||
/*
|
||||
* If we know that SWIOTLB is enabled (and therefore we avoid calling the
|
||||
* kernel to DMA-remap the pages), or if we are using dma_direct (which may
|
||||
* transparently use the SWIOTLB for pages that are unaddressable by the
|
||||
* device, in kernel versions 5.0 and later), limit our allocation pool
|
||||
* to the first 4GB to avoid allocating pages outside of our device's
|
||||
* addressable limit.
|
||||
* Also, limit the allocation to the first 4GB if explicitly requested by
|
||||
* setting the "nv->force_dma32_alloc" variable.
|
||||
*/
|
||||
if (!nv || !nv_requires_dma_remap(nv) || nv_is_dma_direct(dev) || nv->force_dma32_alloc)
|
||||
{
|
||||
NvU64 max_sysmem_address = nv_get_max_sysmem_address();
|
||||
if ((dev && dev->dma_mask && (*(dev->dma_mask) < max_sysmem_address)) ||
|
||||
(nv && nv->force_dma32_alloc))
|
||||
{
|
||||
gfp_mask = NV_GFP_DMA32;
|
||||
}
|
||||
}
|
||||
#if defined(__GFP_RETRY_MAYFAIL)
|
||||
gfp_mask |= __GFP_RETRY_MAYFAIL;
|
||||
#elif defined(__GFP_NORETRY)
|
||||
gfp_mask |= __GFP_NORETRY;
|
||||
#endif
|
||||
#if defined(__GFP_ZERO)
|
||||
if (at->flags.zeroed)
|
||||
gfp_mask |= __GFP_ZERO;
|
||||
#endif
|
||||
#if defined(__GFP_THISNODE)
|
||||
if (at->flags.node0)
|
||||
gfp_mask |= __GFP_THISNODE;
|
||||
#endif
|
||||
// Compound pages are required by vm_insert_page for high-order page
|
||||
// allocations
|
||||
if (at->order > 0)
|
||||
gfp_mask |= __GFP_COMP;
|
||||
|
||||
return gfp_mask;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is needed for allocating contiguous physical memory in xen
|
||||
* dom0. Because of the use of xen sw iotlb in xen dom0, memory allocated by
|
||||
* NV_GET_FREE_PAGES may not be machine contiguous when size is more than
|
||||
* 1 page. nv_alloc_coherent_pages() will give us machine contiguous memory.
|
||||
* Even though we get dma_address directly in this function, we will
|
||||
* still call pci_map_page() later to get dma address. This is fine as it
|
||||
* will return the same machine address.
|
||||
*/
|
||||
static NV_STATUS nv_alloc_coherent_pages(
|
||||
nv_state_t *nv,
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
nvidia_pte_t *page_ptr;
|
||||
NvU32 i;
|
||||
unsigned int gfp_mask;
|
||||
unsigned long virt_addr = 0;
|
||||
dma_addr_t bus_addr;
|
||||
nv_linux_state_t *nvl;
|
||||
struct device *dev;
|
||||
|
||||
if (!nv)
|
||||
{
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: coherent page alloc on nvidiactl not supported\n", __FUNCTION__);
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
nvl = NV_GET_NVL_FROM_NV_STATE(nv);
|
||||
dev = nvl->dev;
|
||||
|
||||
gfp_mask = nv_compute_gfp_mask(nv, at);
|
||||
|
||||
virt_addr = (unsigned long)dma_alloc_coherent(dev,
|
||||
at->num_pages * PAGE_SIZE,
|
||||
&bus_addr,
|
||||
gfp_mask);
|
||||
if (!virt_addr)
|
||||
{
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
|
||||
return NV_ERR_NO_MEMORY;
|
||||
}
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
|
||||
page_ptr->virt_addr = virt_addr + i * PAGE_SIZE;
|
||||
page_ptr->phys_addr = virt_to_phys((void *)page_ptr->virt_addr);
|
||||
page_ptr->dma_addr = bus_addr + i * PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
{
|
||||
nv_set_contig_memory_type(at->page_table[0],
|
||||
at->num_pages,
|
||||
NV_MEMORY_UNCACHED);
|
||||
}
|
||||
|
||||
at->flags.coherent = NV_TRUE;
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static void nv_free_coherent_pages(
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
nvidia_pte_t *page_ptr;
|
||||
struct device *dev = at->dev;
|
||||
|
||||
page_ptr = at->page_table[0];
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
{
|
||||
nv_set_contig_memory_type(at->page_table[0],
|
||||
at->num_pages,
|
||||
NV_MEMORY_WRITEBACK);
|
||||
}
|
||||
|
||||
dma_free_coherent(dev, at->num_pages * PAGE_SIZE,
|
||||
(void *)page_ptr->virt_addr, page_ptr->dma_addr);
|
||||
}
|
||||
|
||||
NV_STATUS nv_alloc_contig_pages(
|
||||
nv_state_t *nv,
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_pte_t *page_ptr;
|
||||
NvU32 i, j;
|
||||
unsigned int gfp_mask;
|
||||
unsigned long virt_addr = 0;
|
||||
NvU64 phys_addr;
|
||||
struct device *dev = at->dev;
|
||||
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
|
||||
|
||||
// TODO: This is a temporary WAR, and will be removed after fixing bug 200732409.
|
||||
if (os_is_xen_dom0() || at->flags.unencrypted)
|
||||
return nv_alloc_coherent_pages(nv, at);
|
||||
|
||||
|
||||
if (!NV_SOC_IS_ISO_IOMMU_PRESENT(nv))
|
||||
{
|
||||
return nv_alloc_coherent_pages(nv, at);
|
||||
}
|
||||
|
||||
|
||||
at->order = get_order(at->num_pages * PAGE_SIZE);
|
||||
gfp_mask = nv_compute_gfp_mask(nv, at);
|
||||
|
||||
if (at->flags.node0)
|
||||
{
|
||||
NV_ALLOC_PAGES_NODE(virt_addr, 0, at->order, gfp_mask);
|
||||
}
|
||||
else
|
||||
{
|
||||
NV_GET_FREE_PAGES(virt_addr, at->order, gfp_mask);
|
||||
}
|
||||
if (virt_addr == 0)
|
||||
{
|
||||
if (os_is_vgx_hyper())
|
||||
{
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: failed to allocate memory, trying coherent memory \n", __FUNCTION__);
|
||||
|
||||
status = nv_alloc_coherent_pages(nv, at);
|
||||
return status;
|
||||
}
|
||||
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
|
||||
return NV_ERR_NO_MEMORY;
|
||||
}
|
||||
#if !defined(__GFP_ZERO)
|
||||
if (at->flags.zeroed)
|
||||
memset((void *)virt_addr, 0, (at->num_pages * PAGE_SIZE));
|
||||
#endif
|
||||
|
||||
for (i = 0; i < at->num_pages; i++, virt_addr += PAGE_SIZE)
|
||||
{
|
||||
phys_addr = nv_get_kern_phys_address(virt_addr);
|
||||
if (phys_addr == 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: VM: %s: failed to look up physical address\n",
|
||||
__FUNCTION__);
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
page_ptr = at->page_table[i];
|
||||
page_ptr->phys_addr = phys_addr;
|
||||
page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr);
|
||||
page_ptr->virt_addr = virt_addr;
|
||||
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
|
||||
|
||||
NV_MAYBE_RESERVE_PAGE(page_ptr);
|
||||
}
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
{
|
||||
nv_set_contig_memory_type(at->page_table[0],
|
||||
at->num_pages,
|
||||
NV_MEMORY_UNCACHED);
|
||||
}
|
||||
|
||||
at->flags.coherent = NV_FALSE;
|
||||
|
||||
return NV_OK;
|
||||
|
||||
failed:
|
||||
if (i > 0)
|
||||
{
|
||||
for (j = 0; j < i; j++)
|
||||
NV_MAYBE_UNRESERVE_PAGE(at->page_table[j]);
|
||||
}
|
||||
|
||||
page_ptr = at->page_table[0];
|
||||
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void nv_free_contig_pages(
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
nvidia_pte_t *page_ptr;
|
||||
unsigned int i;
|
||||
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
|
||||
|
||||
if (at->flags.coherent)
|
||||
return nv_free_coherent_pages(at);
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
{
|
||||
nv_set_contig_memory_type(at->page_table[0],
|
||||
at->num_pages,
|
||||
NV_MEMORY_WRITEBACK);
|
||||
}
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
|
||||
if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count)
|
||||
{
|
||||
static int count = 0;
|
||||
if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: VM: %s: page count != initial page count (%u,%u)\n",
|
||||
__FUNCTION__, NV_GET_PAGE_COUNT(page_ptr),
|
||||
page_ptr->page_count);
|
||||
}
|
||||
}
|
||||
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
|
||||
}
|
||||
|
||||
page_ptr = at->page_table[0];
|
||||
|
||||
NV_FREE_PAGES(page_ptr->virt_addr, at->order);
|
||||
}
|
||||
|
||||
NV_STATUS nv_alloc_system_pages(
|
||||
nv_state_t *nv,
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
NV_STATUS status;
|
||||
nvidia_pte_t *page_ptr;
|
||||
NvU32 i, j;
|
||||
unsigned int gfp_mask;
|
||||
unsigned long virt_addr = 0;
|
||||
NvU64 phys_addr;
|
||||
struct device *dev = at->dev;
|
||||
dma_addr_t bus_addr;
|
||||
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %u: %u pages\n", __FUNCTION__, at->num_pages);
|
||||
|
||||
gfp_mask = nv_compute_gfp_mask(nv, at);
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
if (at->flags.unencrypted && (dev != NULL))
|
||||
{
|
||||
virt_addr = (unsigned long)dma_alloc_coherent(dev,
|
||||
PAGE_SIZE,
|
||||
&bus_addr,
|
||||
gfp_mask);
|
||||
at->flags.coherent = NV_TRUE;
|
||||
}
|
||||
else if (at->flags.node0)
|
||||
{
|
||||
NV_ALLOC_PAGES_NODE(virt_addr, 0, 0, gfp_mask);
|
||||
}
|
||||
else
|
||||
{
|
||||
NV_GET_FREE_PAGES(virt_addr, 0, gfp_mask);
|
||||
}
|
||||
|
||||
if (virt_addr == 0)
|
||||
{
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: failed to allocate memory\n", __FUNCTION__);
|
||||
status = NV_ERR_NO_MEMORY;
|
||||
goto failed;
|
||||
}
|
||||
#if !defined(__GFP_ZERO)
|
||||
if (at->flags.zeroed)
|
||||
memset((void *)virt_addr, 0, PAGE_SIZE);
|
||||
#endif
|
||||
|
||||
phys_addr = nv_get_kern_phys_address(virt_addr);
|
||||
if (phys_addr == 0)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: VM: %s: failed to look up physical address\n",
|
||||
__FUNCTION__);
|
||||
NV_FREE_PAGES(virt_addr, 0);
|
||||
status = NV_ERR_OPERATING_SYSTEM;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
#if defined(_PAGE_NX)
|
||||
if (((_PAGE_NX & pgprot_val(PAGE_KERNEL)) != 0) &&
|
||||
(phys_addr < 0x400000))
|
||||
{
|
||||
nv_printf(NV_DBG_SETUP,
|
||||
"NVRM: VM: %s: discarding page @ 0x%llx\n",
|
||||
__FUNCTION__, phys_addr);
|
||||
--i;
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
page_ptr = at->page_table[i];
|
||||
page_ptr->phys_addr = phys_addr;
|
||||
page_ptr->page_count = NV_GET_PAGE_COUNT(page_ptr);
|
||||
page_ptr->virt_addr = virt_addr;
|
||||
|
||||
//
|
||||
// Use unencrypted dma_addr returned by dma_alloc_coherent() as
|
||||
// nv_phys_to_dma() returns encrypted dma_addr when AMD SEV is enabled.
|
||||
//
|
||||
if (at->flags.coherent)
|
||||
page_ptr->dma_addr = bus_addr;
|
||||
else if (dev)
|
||||
page_ptr->dma_addr = nv_phys_to_dma(dev, page_ptr->phys_addr);
|
||||
else
|
||||
page_ptr->dma_addr = page_ptr->phys_addr;
|
||||
|
||||
NV_MAYBE_RESERVE_PAGE(page_ptr);
|
||||
}
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
nv_set_memory_type(at, NV_MEMORY_UNCACHED);
|
||||
|
||||
return NV_OK;
|
||||
|
||||
failed:
|
||||
if (i > 0)
|
||||
{
|
||||
for (j = 0; j < i; j++)
|
||||
{
|
||||
page_ptr = at->page_table[j];
|
||||
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
|
||||
if (at->flags.coherent)
|
||||
{
|
||||
dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr,
|
||||
page_ptr->dma_addr);
|
||||
}
|
||||
else
|
||||
{
|
||||
NV_FREE_PAGES(page_ptr->virt_addr, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void nv_free_system_pages(
|
||||
nv_alloc_t *at
|
||||
)
|
||||
{
|
||||
nvidia_pte_t *page_ptr;
|
||||
unsigned int i;
|
||||
struct device *dev = at->dev;
|
||||
|
||||
nv_printf(NV_DBG_MEMINFO,
|
||||
"NVRM: VM: %s: %u pages\n", __FUNCTION__, at->num_pages);
|
||||
|
||||
if (at->cache_type != NV_MEMORY_CACHED)
|
||||
nv_set_memory_type(at, NV_MEMORY_WRITEBACK);
|
||||
|
||||
for (i = 0; i < at->num_pages; i++)
|
||||
{
|
||||
page_ptr = at->page_table[i];
|
||||
|
||||
if (NV_GET_PAGE_COUNT(page_ptr) != page_ptr->page_count)
|
||||
{
|
||||
static int count = 0;
|
||||
if (count++ < NV_MAX_RECURRING_WARNING_MESSAGES)
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: VM: %s: page count != initial page count (%u,%u)\n",
|
||||
__FUNCTION__, NV_GET_PAGE_COUNT(page_ptr),
|
||||
page_ptr->page_count);
|
||||
}
|
||||
}
|
||||
|
||||
NV_MAYBE_UNRESERVE_PAGE(page_ptr);
|
||||
if (at->flags.coherent)
|
||||
{
|
||||
dma_free_coherent(dev, PAGE_SIZE, (void *)page_ptr->virt_addr,
|
||||
page_ptr->dma_addr);
|
||||
}
|
||||
else
|
||||
{
|
||||
NV_FREE_PAGES(page_ptr->virt_addr, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NvUPtr nv_vm_map_pages(
|
||||
struct page **pages,
|
||||
NvU32 count,
|
||||
NvBool cached,
|
||||
NvBool unencrypted
|
||||
)
|
||||
{
|
||||
NvUPtr virt_addr = 0;
|
||||
|
||||
if (!NV_MAY_SLEEP())
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s: can't map %d pages, invalid context!\n",
|
||||
__FUNCTION__, count);
|
||||
os_dbg_breakpoint();
|
||||
return virt_addr;
|
||||
}
|
||||
|
||||
virt_addr = nv_vmap(pages, count, cached, unencrypted);
|
||||
return virt_addr;
|
||||
}
|
||||
|
||||
void nv_vm_unmap_pages(
|
||||
NvUPtr virt_addr,
|
||||
NvU32 count
|
||||
)
|
||||
{
|
||||
if (!NV_MAY_SLEEP())
|
||||
{
|
||||
nv_printf(NV_DBG_ERRORS,
|
||||
"NVRM: %s: can't unmap %d pages at 0x%0llx, "
|
||||
"invalid context!\n", __FUNCTION__, count, virt_addr);
|
||||
os_dbg_breakpoint();
|
||||
return;
|
||||
}
|
||||
|
||||
nv_vunmap(virt_addr, count);
|
||||
}
|
||||
|
||||
void nv_address_space_init_once(struct address_space *mapping)
|
||||
{
|
||||
#if defined(NV_ADDRESS_SPACE_INIT_ONCE_PRESENT)
|
||||
address_space_init_once(mapping);
|
||||
#else
|
||||
memset(mapping, 0, sizeof(*mapping));
|
||||
INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
|
||||
|
||||
#if defined(NV_ADDRESS_SPACE_HAS_RWLOCK_TREE_LOCK)
|
||||
//
|
||||
// The .tree_lock member variable was changed from type rwlock_t, to
|
||||
// spinlock_t, on 25 July 2008, by mainline commit
|
||||
// 19fd6231279be3c3bdd02ed99f9b0eb195978064.
|
||||
//
|
||||
rwlock_init(&mapping->tree_lock);
|
||||
#else
|
||||
spin_lock_init(&mapping->tree_lock);
|
||||
#endif
|
||||
|
||||
spin_lock_init(&mapping->i_mmap_lock);
|
||||
INIT_LIST_HEAD(&mapping->private_list);
|
||||
spin_lock_init(&mapping->private_lock);
|
||||
INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
|
||||
INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
|
||||
#endif /* !NV_ADDRESS_SPACE_INIT_ONCE_PRESENT */
|
||||
}
|
||||
@@ -1,535 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/******************************* DisplayPort *******************************\
|
||||
* *
|
||||
* Module: dp_configcaps.h *
|
||||
* Abstraction for basic caps registers *
|
||||
* *
|
||||
\***************************************************************************/
|
||||
|
||||
#ifndef INCLUDED_DP_CONFIGCAPS_H
|
||||
#define INCLUDED_DP_CONFIGCAPS_H
|
||||
|
||||
#include "dp_connector.h"
|
||||
#include "dp_auxretry.h"
|
||||
#include "dp_linkconfig.h"
|
||||
#include "dp_regkeydatabase.h"
|
||||
|
||||
namespace DisplayPort
|
||||
{
|
||||
enum PowerState
|
||||
{
|
||||
PowerStateD0 = 1,
|
||||
PowerStateD3 = 2,
|
||||
PowerStateD3AuxOn = 5
|
||||
};
|
||||
|
||||
// Extended caps = offset 0x80
|
||||
enum DwnStreamPortType
|
||||
{
|
||||
DISPLAY_PORT = 0,
|
||||
ANALOG_VGA,
|
||||
DVI,
|
||||
HDMI,
|
||||
WITHOUT_EDID,
|
||||
DISPLAY_PORT_PLUSPLUS
|
||||
} ;
|
||||
|
||||
enum DwnStreamPortAttribute
|
||||
{
|
||||
RESERVED = 0,
|
||||
IL_720_480_60HZ,
|
||||
IL_720_480_50HZ,
|
||||
IL_1920_1080_60HZ,
|
||||
IL_1920_1080_50HZ,
|
||||
PG_1280_720_60HZ,
|
||||
PG_1280_720_50_HZ,
|
||||
} ;
|
||||
|
||||
// DPCD Offset 102 enums
|
||||
enum TrainingPatternSelectType
|
||||
{
|
||||
TRAINING_DISABLED,
|
||||
TRAINING_PAT_ONE,
|
||||
TRAINING_PAT_TWO,
|
||||
TRAINING_PAT_THREE,
|
||||
};
|
||||
|
||||
enum SymbolErrorSelectType
|
||||
{
|
||||
DISPARITY_ILLEGAL_SYMBOL_ERROR,
|
||||
DISPARITY_ERROR,
|
||||
ILLEGAL_SYMBOL_ERROR,
|
||||
};
|
||||
|
||||
// DPCD Offset 1A1 enums
|
||||
enum MultistreamHotplugMode
|
||||
{
|
||||
HPD_LONG_PULSE,
|
||||
IRQ_HPD,
|
||||
};
|
||||
|
||||
// DPCD Offset 220
|
||||
enum TestPatternType
|
||||
{
|
||||
NO_PATTERN,
|
||||
COLOR_RAMPS,
|
||||
BLACK_WHITE,
|
||||
COLOR_SQUARE,
|
||||
} ;
|
||||
|
||||
// DPCD Offset 232, 233
|
||||
enum ColorFormatType
|
||||
{
|
||||
RGB,
|
||||
YCbCr_422,
|
||||
YCbCr_444,
|
||||
} ;
|
||||
|
||||
enum DynamicRangeType
|
||||
{
|
||||
VESA,
|
||||
CEA,
|
||||
} ;
|
||||
|
||||
enum YCBCRCoeffType
|
||||
{
|
||||
ITU601,
|
||||
ITU709,
|
||||
} ;
|
||||
|
||||
#define HDCP_BCAPS_SIZE (0x1)
|
||||
#define HDCP_VPRIME_SIZE (0x14)
|
||||
#define HDCP_KSV_FIFO_SIZE (0xF)
|
||||
#define HDCP_KSV_FIFO_WINDOWS_RETRY (0x3)
|
||||
#define HDCP22_BCAPS_SIZE (0x1)
|
||||
|
||||
// Bstatus DPCD offset 0x68029
|
||||
#define HDCPREADY (0x1)
|
||||
#define R0PRIME_AVAILABLE (0x2)
|
||||
#define LINK_INTEGRITY_FAILURE (0x4)
|
||||
#define REAUTHENTICATION_REQUEST (0x8)
|
||||
|
||||
struct BInfo
|
||||
{
|
||||
bool maxCascadeExceeded;
|
||||
unsigned depth;
|
||||
bool maxDevsExceeded;
|
||||
unsigned deviceCount;
|
||||
};
|
||||
|
||||
struct BCaps
|
||||
{
|
||||
bool repeater;
|
||||
bool HDCPCapable;
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
PHYSICAL_PORT_START = 0x0,
|
||||
PHYSICAL_PORT_END = 0x7,
|
||||
LOGICAL_PORT_START = 0x8,
|
||||
LOGICAL_PORT_END = 0xF
|
||||
};
|
||||
|
||||
class LaneStatus
|
||||
{
|
||||
public:
|
||||
//
|
||||
// Lane Status
|
||||
// CAUTION: Only updated on IRQ/HPD right now
|
||||
//
|
||||
virtual bool getLaneStatusClockRecoveryDone(int lane) = 0; // DPCD offset 202, 203
|
||||
virtual bool getLaneStatusSymbolLock(int lane)= 0;
|
||||
virtual bool getInterlaneAlignDone() = 0;
|
||||
virtual bool getDownStreamPortStatusChange() = 0;
|
||||
};
|
||||
|
||||
class TestRequest
|
||||
{
|
||||
public:
|
||||
virtual bool getPendingTestRequestTraining() = 0; // DPCD offset 218
|
||||
virtual void getTestRequestTraining(LinkRate & rate, unsigned & lanes) = 0; // DPCD offset 219, 220
|
||||
virtual bool getPendingAutomatedTestRequest() = 0; // DPCD offset 218
|
||||
virtual bool getPendingTestRequestEdidRead() = 0; // DPCD offset 218
|
||||
virtual bool getPendingTestRequestPhyCompliance() = 0; // DPCD offset 218
|
||||
virtual LinkQualityPatternType getPhyTestPattern() = 0; // DPCD offset 248
|
||||
virtual AuxRetry::status setTestResponse(bool ack, bool edidChecksumWrite = false) = 0;
|
||||
virtual AuxRetry::status setTestResponseChecksum(NvU8 checksum) = 0;
|
||||
};
|
||||
|
||||
class LegacyPort
|
||||
{
|
||||
public:
|
||||
virtual DwnStreamPortType getDownstreamPortType() = 0;
|
||||
virtual DwnStreamPortAttribute getDownstreamNonEDIDPortAttribute() = 0;
|
||||
|
||||
// For port type = HDMI
|
||||
virtual NvU64 getMaxTmdsClkRate() = 0;
|
||||
};
|
||||
|
||||
class LinkState
|
||||
{
|
||||
public:
|
||||
//
|
||||
// Link state
|
||||
//
|
||||
virtual bool isPostLtAdjustRequestSupported() = 0;
|
||||
virtual void setPostLtAdjustRequestGranted(bool bGrantPostLtRequest) = 0;
|
||||
virtual bool getIsPostLtAdjRequestInProgress() = 0; // DPCD offset 204
|
||||
virtual TrainingPatternSelectType getTrainingPatternSelect() = 0; // DPCD offset 102
|
||||
|
||||
virtual bool setTrainingMultiLaneSet(NvU8 numLanes,
|
||||
NvU8 *voltSwingSet,
|
||||
NvU8 *preEmphasisSet) = 0;
|
||||
|
||||
virtual bool readTraining(NvU8* voltageSwingLane,
|
||||
NvU8* preemphasisLane = 0,
|
||||
NvU8* trainingScoreLane = 0,
|
||||
NvU8* postCursor = 0,
|
||||
NvU8 activeLaneCount = 0) = 0;
|
||||
|
||||
virtual bool isLaneSettingsChanged(NvU8* oldVoltageSwingLane,
|
||||
NvU8* newVoltageSwingLane,
|
||||
NvU8* oldPreemphasisLane,
|
||||
NvU8* newPreemphasisLane,
|
||||
NvU8 activeLaneCount) = 0;
|
||||
|
||||
virtual AuxRetry::status setIgnoreMSATimingParamters(bool msaTimingParamIgnoreEn) = 0;
|
||||
virtual AuxRetry::status setLinkQualLaneSet(unsigned lane, LinkQualityPatternType linkQualPattern) = 0;
|
||||
virtual AuxRetry::status setLinkQualPatternSet(LinkQualityPatternType linkQualPattern, unsigned laneCount = 0) = 0;
|
||||
};
|
||||
|
||||
class LinkCapabilities
|
||||
{
|
||||
public:
|
||||
|
||||
//
|
||||
// Physical layer feature set
|
||||
//
|
||||
virtual NvU64 getMaxLinkRate() = 0; // Maximum byte-block in Hz
|
||||
virtual unsigned getMaxLaneCount() = 0; // DPCD offset 2
|
||||
virtual unsigned getMaxLaneCountSupportedAtLinkRate(LinkRate linkRate) = 0;
|
||||
virtual bool getEnhancedFraming() = 0;
|
||||
virtual bool getSupportsNoHandshakeTraining() = 0;
|
||||
virtual bool getMsaTimingparIgnored() = 0;
|
||||
virtual bool getDownstreamPort(NvU8 *portType) = 0; // DPCD offset 5
|
||||
virtual bool getSupportsMultistream() = 0; // DPCD offset 21h
|
||||
virtual bool getNoLinkTraining() = 0; // DPCD offset 330h
|
||||
virtual unsigned getPhyRepeaterCount() = 0; // DPCD offset F0002h
|
||||
};
|
||||
|
||||
class OUI
|
||||
{
|
||||
public:
|
||||
virtual bool getOuiSupported() = 0;
|
||||
virtual AuxRetry::status setOuiSource(unsigned ouiId, const char * model, size_t modelNameLength, NvU8 chipRevision) = 0;
|
||||
virtual bool getOuiSink(unsigned &ouiId, char * modelName, size_t modelNameBufferSize, NvU8 & chipRevision) = 0;
|
||||
};
|
||||
|
||||
class HDCP
|
||||
{
|
||||
public:
|
||||
virtual bool getBKSV(NvU8 *bKSV) = 0; // DPCD offset 0x68000
|
||||
virtual bool getBCaps(BCaps &bCaps, NvU8 * rawByte = 0) = 0; // DPCD offset 0x68028
|
||||
virtual bool getHdcp22BCaps(BCaps &bCaps, NvU8 * rawByte = 0) = 0; // DPCD offset 0x6921D
|
||||
virtual bool getBinfo(BInfo &bInfo) = 0; // DPCD offset 0x6802A
|
||||
|
||||
// Generic interfaces for HDCP 1.x / 2.2
|
||||
virtual bool getRxStatus(const HDCPState &hdcpState, NvU8 *data) = 0;
|
||||
};
|
||||
|
||||
class DPCDHAL :
|
||||
virtual public Object,
|
||||
public TestRequest,
|
||||
public LaneStatus,
|
||||
public LinkState,
|
||||
public LinkCapabilities,
|
||||
public OUI,
|
||||
public HDCP
|
||||
{
|
||||
public:
|
||||
//
|
||||
// Notifications of external events
|
||||
// We sent IRQ/HPD events to the HAL so that it knows
|
||||
// when to re-read the registers. All the remaining
|
||||
// calls are either accessors to cached state (caps),
|
||||
// or DPCD get/setters
|
||||
//
|
||||
virtual void notifyIRQ() = 0;
|
||||
virtual void notifyHPD(bool status, bool bSkipDPCDRead = false) = 0;
|
||||
|
||||
virtual void populateFakeDpcd() = 0;
|
||||
|
||||
// DPCD override routines
|
||||
virtual void overrideMaxLinkRate(NvU32 overrideMaxLinkRate) = 0;
|
||||
virtual void overrideMaxLaneCount(NvU32 maxLaneCount) = 0;
|
||||
virtual void skipCableBWCheck(NvU32 maxLaneAtHighRate, NvU32 maxLaneAtLowRate) = 0;
|
||||
virtual void overrideOptimalLinkCfg(LinkRate optimalLinkRate, NvU32 optimalLaneCount) = 0;
|
||||
virtual void overrideOptimalLinkRate(LinkRate optimalLinkRate) = 0;
|
||||
|
||||
virtual bool isDpcdOffline() = 0;
|
||||
virtual void setAuxBus(AuxBus * bus) = 0;
|
||||
virtual NvU32 getVideoFallbackSupported() = 0;
|
||||
//
|
||||
// Cached CAPS
|
||||
// These are only re-read when notifyHPD is called
|
||||
//
|
||||
virtual unsigned getRevisionMajor() = 0;
|
||||
virtual unsigned getRevisionMinor() = 0;
|
||||
|
||||
virtual unsigned lttprGetRevisionMajor() = 0;
|
||||
virtual unsigned lttprGetRevisionMinor() = 0;
|
||||
|
||||
virtual bool getSDPExtnForColorimetry() = 0;
|
||||
|
||||
bool isAtLeastVersion(unsigned major, unsigned minor)
|
||||
{
|
||||
if (getRevisionMajor() > major)
|
||||
return true;
|
||||
|
||||
if (getRevisionMajor() < major)
|
||||
return false;
|
||||
|
||||
return getRevisionMinor() >= minor;
|
||||
}
|
||||
|
||||
bool isVersion(unsigned major, unsigned minor)
|
||||
{
|
||||
if ((getRevisionMajor() == major) &&
|
||||
(getRevisionMinor() == minor))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool lttprIsAtLeastVersion(unsigned major, unsigned minor)
|
||||
{
|
||||
if (lttprGetRevisionMajor() > major)
|
||||
return true;
|
||||
|
||||
if (lttprGetRevisionMinor() < major)
|
||||
return false;
|
||||
|
||||
return lttprGetRevisionMinor() >= minor;
|
||||
}
|
||||
|
||||
bool lttprIsVersion(unsigned major, unsigned minor)
|
||||
{
|
||||
if ((lttprGetRevisionMajor() == major) &&
|
||||
(lttprGetRevisionMinor() == minor))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
// Convert Link Bandwidth read from DPCD register to Linkrate
|
||||
NvU64 mapLinkBandiwdthToLinkrate(NvU32 linkBandwidth)
|
||||
{
|
||||
if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _1_62_GBPS, linkBandwidth))
|
||||
return RBR;
|
||||
else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _2_70_GBPS, linkBandwidth))
|
||||
return HBR;
|
||||
else if (FLD_TEST_DRF(_DPCD, _MAX_LINK_BANDWIDTH, _VAL, _5_40_GBPS, linkBandwidth))
|
||||
return HBR2;
|
||||
else if (FLD_TEST_DRF(_DPCD14, _MAX_LINK_BANDWIDTH, _VAL, _8_10_GBPS, linkBandwidth))
|
||||
return HBR3;
|
||||
else
|
||||
{
|
||||
DP_ASSERT(0 && "Unknown link bandwidth. Assuming HBR");
|
||||
return HBR;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Native aux transaction size (16 for AUX)
|
||||
//
|
||||
virtual size_t getTransactionSize() = 0;
|
||||
|
||||
//
|
||||
// SST Branching device/dongle/repeater
|
||||
// - Describes downstream port limitations
|
||||
// - Not for use with MST
|
||||
// - Primarily used for dongles (look at port 0 for pclk limits)
|
||||
//
|
||||
virtual LegacyPort * getLegacyPort(unsigned index) = 0;
|
||||
virtual unsigned getLegacyPortCount() = 0;
|
||||
|
||||
virtual PCONCaps * getPCONCaps() = 0;
|
||||
|
||||
//
|
||||
// Single stream specific caps
|
||||
//
|
||||
virtual unsigned getNumberOfAudioEndpoints() = 0;
|
||||
virtual int getSinkCount() = 0;
|
||||
virtual void setSinkCount(int sinkCount) = 0;
|
||||
|
||||
//
|
||||
// MISC
|
||||
//
|
||||
virtual bool isPC2Disabled() = 0;
|
||||
virtual void setPC2Disabled(bool disabled) = 0;
|
||||
|
||||
virtual void setDPCDOffline(bool enable) = 0;
|
||||
virtual void updateDPCDOffline() = 0;
|
||||
|
||||
virtual void setSupportsESI(bool bIsESISupported) = 0;
|
||||
virtual void setLttprSupported(bool isLttprSupported) = 0;
|
||||
|
||||
//
|
||||
// Intermediate Link Rate (eDP ILR)
|
||||
//
|
||||
virtual void setIndexedLinkrateEnabled(bool newVal) = 0;
|
||||
virtual bool isIndexedLinkrateEnabled() = 0;
|
||||
virtual bool isIndexedLinkrateCapable() = 0;
|
||||
virtual NvU16 *getLinkRateTable() = 0;
|
||||
virtual bool getRawLinkRateTable(NvU8 *buffer = NULL) = 0;
|
||||
|
||||
//
|
||||
// Link power state management
|
||||
//
|
||||
virtual bool setPowerState(PowerState newState) = 0;
|
||||
virtual PowerState getPowerState() = 0;
|
||||
//
|
||||
// Multistream
|
||||
//
|
||||
virtual bool getGUID(GUID & guid) = 0; // DPCD offset 30
|
||||
virtual AuxRetry::status setGUID(GUID & guid) = 0;
|
||||
virtual AuxRetry::status setMessagingEnable(bool uprequestEnable, bool upstreamIsSource) = 0;
|
||||
virtual AuxRetry::status setMultistreamLink(bool bMultistream) = 0;
|
||||
virtual void payloadTableClearACT() = 0;
|
||||
virtual bool payloadWaitForACTReceived() = 0;
|
||||
virtual bool payloadAllocate(unsigned streamId, unsigned begin, unsigned count) = 0;
|
||||
virtual bool clearPendingMsg() = 0;
|
||||
virtual bool isMessagingEnabled() = 0;
|
||||
|
||||
//
|
||||
// If set to IRQ we'll receive CSN messages on hotplugs (which are actually easy to miss).
|
||||
// If set to HPD mode we'll always receive an HPD whenever the topology changes.
|
||||
// The library supports using both modes.
|
||||
//
|
||||
virtual AuxRetry::status setMultistreamHotplugMode(MultistreamHotplugMode notifyType) = 0;
|
||||
|
||||
//
|
||||
// Interrupts
|
||||
//
|
||||
virtual bool interruptContentProtection() = 0;
|
||||
virtual void clearInterruptContentProtection() = 0;
|
||||
|
||||
virtual bool intteruptMCCS() = 0;
|
||||
virtual void clearInterruptMCCS() = 0;
|
||||
|
||||
virtual bool interruptDownReplyReady() = 0;
|
||||
virtual void clearInterruptDownReplyReady() = 0;
|
||||
|
||||
virtual bool interruptUpRequestReady() = 0;
|
||||
virtual void clearInterruptUpRequestReady() = 0;
|
||||
|
||||
virtual bool interruptCapabilitiesChanged() = 0;
|
||||
virtual void clearInterruptCapabilitiesChanged() = 0;
|
||||
|
||||
virtual bool getLinkStatusChanged() = 0;
|
||||
virtual void clearLinkStatusChanged() = 0;
|
||||
|
||||
virtual bool getHdmiLinkStatusChanged() = 0;
|
||||
virtual void clearHdmiLinkStatusChanged() = 0;
|
||||
|
||||
virtual bool getStreamStatusChanged() = 0;
|
||||
virtual void clearStreamStatusChanged() =0;
|
||||
|
||||
virtual void setDirtyLinkStatus(bool dirty) = 0;
|
||||
virtual void refreshLinkStatus() = 0;
|
||||
virtual bool isLinkStatusValid(unsigned lanes) = 0;
|
||||
|
||||
virtual void getCustomTestPattern(NvU8 *testPattern) = 0; // DPCD offset 250 - 259
|
||||
|
||||
//
|
||||
// Message Boxes
|
||||
//
|
||||
virtual AuxRetry::status writeDownRequestMessageBox(NvU8 * data, size_t length) = 0;
|
||||
virtual size_t getDownRequestMessageBoxSize() = 0;
|
||||
|
||||
virtual AuxRetry::status writeUpReplyMessageBox(NvU8 * data, size_t length) = 0;
|
||||
virtual size_t getUpReplyMessageBoxSize() = 0;
|
||||
|
||||
virtual AuxRetry::status readDownReplyMessageBox(NvU32 offset, NvU8 * data, size_t length) = 0;
|
||||
virtual size_t getDownReplyMessageBoxSize() = 0;
|
||||
|
||||
virtual AuxRetry::status readUpRequestMessageBox(NvU32 offset, NvU8 * data, size_t length) = 0;
|
||||
virtual size_t getUpRequestMessageBoxSize() = 0;
|
||||
|
||||
// MST<->SST override
|
||||
virtual void overrideMultiStreamCap(bool mstCapable) = 0;
|
||||
virtual bool getMultiStreamCapOverride() = 0;
|
||||
|
||||
virtual bool getDpcdMultiStreamCap(void) = 0;
|
||||
|
||||
// Set GPU DP support capability
|
||||
virtual void setGpuDPSupportedVersions(bool supportDp1_2, bool supportDp1_4) = 0;
|
||||
|
||||
// Set GPU FEC support capability
|
||||
virtual void setGpuFECSupported(bool bSupportFEC) = 0;
|
||||
|
||||
virtual void applyRegkeyOverrides(const DP_REGKEY_DATABASE& dpRegkeyDatabase) = 0;
|
||||
|
||||
// PCON configuration
|
||||
|
||||
// Reset PCON (to default state)
|
||||
virtual void resetProtocolConverter() = 0;
|
||||
// Source control mode and FRL/HDMI mode selection.
|
||||
virtual bool setSourceControlMode(bool bEnableSourceControlMode, bool bEnableFRLMode) = 0;
|
||||
|
||||
virtual bool checkPCONFrlReady(bool *bFrlReady) = 0;
|
||||
virtual bool setupPCONFrlLinkAssessment(NvU32 linkBw,
|
||||
bool bEnableExtendLTMode = false,
|
||||
bool bEnableConcurrentMode = false) = 0;
|
||||
|
||||
virtual bool checkPCONFrlLinkStatus(NvU32 *frlRate) = 0;
|
||||
|
||||
virtual bool queryHdmiLinkStatus(bool *bLinkActive, bool *bLinkReady) = 0;
|
||||
virtual NvU32 restorePCONFrlLink(NvU32 linkBwMask,
|
||||
bool bEnableExtendLTMode = false,
|
||||
bool bEnableConcurrentMode = false) = 0;
|
||||
|
||||
virtual void readPsrCapabilities(vesaPsrSinkCaps *caps) = 0;
|
||||
virtual bool updatePsrConfiguration(vesaPsrConfig config) = 0;
|
||||
virtual bool readPsrConfiguration(vesaPsrConfig *config) = 0;
|
||||
virtual bool readPsrState(vesaPsrState *psrState) = 0;
|
||||
virtual bool readPsrDebugInfo(vesaPsrDebugStatus *psrDbgState) = 0;
|
||||
virtual bool writePsrErrorStatus(vesaPsrErrorStatus psrErr) = 0;
|
||||
virtual bool readPsrErrorStatus(vesaPsrErrorStatus *psrErr) = 0;
|
||||
virtual bool writePsrEvtIndicator(vesaPsrEventIndicator psrErr) = 0;
|
||||
virtual bool readPsrEvtIndicator(vesaPsrEventIndicator *psrErr) = 0;
|
||||
|
||||
virtual ~DPCDHAL() {}
|
||||
|
||||
};
|
||||
|
||||
//
|
||||
// Implement interface
|
||||
//
|
||||
DPCDHAL * MakeDPCDHAL(AuxBus * bus, Timer * timer);
|
||||
}
|
||||
|
||||
#endif //INCLUDED_DP_CONFIGCAPS_H
|
||||
@@ -1,450 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/******************************* List **************************************\
|
||||
* *
|
||||
* Module: dp_linkconfig.h *
|
||||
* Link Configuration object implementation *
|
||||
* *
|
||||
\***************************************************************************/
|
||||
#ifndef INCLUDED_DP_LINKCONFIG_H
|
||||
#define INCLUDED_DP_LINKCONFIG_H
|
||||
|
||||
#include "dp_auxdefs.h"
|
||||
#include "dp_internal.h"
|
||||
#include "dp_watermark.h"
|
||||
#include "ctrl/ctrl0073/ctrl0073specific.h" // NV0073_CTRL_HDCP_VPRIME_SIZE
|
||||
#include "displayport.h"
|
||||
|
||||
namespace DisplayPort
|
||||
{
|
||||
typedef NvU64 LinkRate;
|
||||
|
||||
class LinkRates : virtual public Object
|
||||
{
|
||||
public:
|
||||
// Store link rate in multipler of 270MBPS to save space
|
||||
NvU8 element[NV_DPCD_SUPPORTED_LINK_RATES__SIZE];
|
||||
NvU8 entries;
|
||||
|
||||
LinkRates() : entries(0) {}
|
||||
|
||||
void clear()
|
||||
{
|
||||
entries = 0;
|
||||
for (int i = 0; i < NV_DPCD_SUPPORTED_LINK_RATES__SIZE; i++)
|
||||
{
|
||||
element[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool import(NvU8 linkBw)
|
||||
{
|
||||
if (entries < NV_DPCD_SUPPORTED_LINK_RATES__SIZE)
|
||||
{
|
||||
element[entries] = linkBw;
|
||||
entries++;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
NvU8 getNumLinkRates()
|
||||
{
|
||||
return entries;
|
||||
}
|
||||
|
||||
LinkRate getLowerRate(LinkRate rate)
|
||||
{
|
||||
int i;
|
||||
NvU8 linkBw = (NvU8)(rate / DP_LINK_BW_FREQ_MULTI_MBPS);
|
||||
|
||||
if ((entries == 0) || (linkBw <= element[0]))
|
||||
return 0;
|
||||
|
||||
for (i = entries - 1; i > 0; i--)
|
||||
{
|
||||
if (linkBw > element[i])
|
||||
break;
|
||||
}
|
||||
|
||||
rate = (LinkRate)element[i] * DP_LINK_BW_FREQ_MULTI_MBPS;
|
||||
return rate;
|
||||
}
|
||||
|
||||
LinkRate getMaxRate()
|
||||
{
|
||||
LinkRate rate = 0;
|
||||
if ((entries > 0) &&
|
||||
(entries <= NV_DPCD_SUPPORTED_LINK_RATES__SIZE))
|
||||
{
|
||||
rate = (LinkRate)element[entries - 1] * DP_LINK_BW_FREQ_MULTI_MBPS;
|
||||
}
|
||||
|
||||
return rate;
|
||||
}
|
||||
};
|
||||
|
||||
class LinkPolicy : virtual public Object
|
||||
{
|
||||
bool bNoFallback; // No fallback when LT fails
|
||||
LinkRates linkRates;
|
||||
|
||||
public:
|
||||
LinkPolicy() : bNoFallback(false)
|
||||
{
|
||||
}
|
||||
bool skipFallback()
|
||||
{
|
||||
return bNoFallback;
|
||||
}
|
||||
void setSkipFallBack(bool bSkipFallback)
|
||||
{
|
||||
bNoFallback = bSkipFallback;
|
||||
}
|
||||
|
||||
LinkRates *getLinkRates()
|
||||
{
|
||||
return &linkRates;
|
||||
}
|
||||
};
|
||||
enum
|
||||
{
|
||||
totalTimeslots = 64,
|
||||
totalUsableTimeslots = totalTimeslots - 1
|
||||
};
|
||||
|
||||
// in MBps
|
||||
enum
|
||||
{
|
||||
RBR = 162000000,
|
||||
EDP_2_16GHZ = 216000000,
|
||||
EDP_2_43GHZ = 243000000,
|
||||
HBR = 270000000,
|
||||
EDP_3_24GHZ = 324000000,
|
||||
EDP_4_32GHZ = 432000000,
|
||||
HBR2 = 540000000,
|
||||
EDP_6_75GHZ = 675000000,
|
||||
HBR3 = 810000000
|
||||
};
|
||||
|
||||
struct HDCPState
|
||||
{
|
||||
bool HDCP_State_Encryption;
|
||||
bool HDCP_State_1X_Capable;
|
||||
bool HDCP_State_22_Capable;
|
||||
bool HDCP_State_Authenticated;
|
||||
bool HDCP_State_Repeater_Capable;
|
||||
};
|
||||
|
||||
struct HDCPValidateData
|
||||
{
|
||||
};
|
||||
|
||||
typedef enum
|
||||
{
|
||||
DP_SINGLE_HEAD_MULTI_STREAM_MODE_NONE,
|
||||
DP_SINGLE_HEAD_MULTI_STREAM_MODE_SST,
|
||||
DP_SINGLE_HEAD_MULTI_STREAM_MODE_MST,
|
||||
}DP_SINGLE_HEAD_MULTI_STREAM_MODE;
|
||||
|
||||
#define HEAD_INVALID_STREAMS 0
|
||||
#define HEAD_DEFAULT_STREAMS 1
|
||||
|
||||
typedef enum
|
||||
{
|
||||
DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_PRIMARY = 0,
|
||||
DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY = 1,
|
||||
DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_MAX = DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID_SECONDARY,
|
||||
} DP_SINGLE_HEAD_MULTI_STREAM_PIPELINE_ID;
|
||||
|
||||
#define DP_INVALID_SOR_INDEX 0xFFFFFFFF
|
||||
#define DSC_DEPTH_FACTOR 16
|
||||
|
||||
|
||||
class LinkConfiguration : virtual public Object
|
||||
{
|
||||
public:
|
||||
LinkPolicy policy;
|
||||
unsigned lanes;
|
||||
LinkRate peakRatePossible;
|
||||
LinkRate peakRate;
|
||||
LinkRate minRate;
|
||||
bool enhancedFraming;
|
||||
bool multistream;
|
||||
bool disablePostLTRequest;
|
||||
bool bEnableFEC;
|
||||
bool bDisableLTTPR;
|
||||
|
||||
//
|
||||
// The counter to record how many times link training happens.
|
||||
// Client can reset the counter by calling setLTCounter(0)
|
||||
//
|
||||
unsigned linkTrainCounter;
|
||||
|
||||
LinkConfiguration() :
|
||||
lanes(0), peakRatePossible(0), peakRate(0), minRate(0),
|
||||
enhancedFraming(false), multistream(false), disablePostLTRequest(false),
|
||||
bEnableFEC(false), bDisableLTTPR(false), linkTrainCounter(0) {};
|
||||
|
||||
LinkConfiguration(LinkPolicy * p, unsigned lanes, LinkRate peakRate,
|
||||
bool enhancedFraming, bool MST, bool disablePostLTRequest = false,
|
||||
bool bEnableFEC = false, bool bDisableLTTPR = false) :
|
||||
lanes(lanes), peakRatePossible(peakRate), peakRate(peakRate),
|
||||
enhancedFraming(enhancedFraming), multistream(MST),
|
||||
disablePostLTRequest(disablePostLTRequest),
|
||||
bEnableFEC(bEnableFEC), bDisableLTTPR(bDisableLTTPR),
|
||||
linkTrainCounter(0)
|
||||
{
|
||||
// downrate for spread and FEC
|
||||
minRate = linkOverhead(peakRate);
|
||||
if (p)
|
||||
{
|
||||
policy = *p;
|
||||
}
|
||||
}
|
||||
|
||||
void setLTCounter(unsigned counter)
|
||||
{
|
||||
linkTrainCounter = counter;
|
||||
}
|
||||
|
||||
unsigned getLTCounter()
|
||||
{
|
||||
return linkTrainCounter;
|
||||
}
|
||||
|
||||
NvU64 linkOverhead(NvU64 rate)
|
||||
{
|
||||
if(bEnableFEC)
|
||||
{
|
||||
|
||||
// if FEC is enabled, we have to account for 3% overhead
|
||||
// for FEC+downspread according to DP 1.4 spec
|
||||
|
||||
return rate - 3 * rate/ 100;
|
||||
}
|
||||
else
|
||||
{
|
||||
// if FEC is not enabled, link overhead comprises only of
|
||||
// 0.05% downspread.
|
||||
return rate - 5 * rate/ 1000;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
void enableFEC(bool setFEC)
|
||||
{
|
||||
bEnableFEC = setFEC;
|
||||
|
||||
// If FEC is enabled, update minRate with FEC+downspread overhead.
|
||||
minRate = linkOverhead(peakRate);
|
||||
}
|
||||
|
||||
LinkConfiguration(unsigned long TotalLinkPBN)
|
||||
: enhancedFraming(true),
|
||||
multistream(true),
|
||||
disablePostLTRequest(false),
|
||||
bEnableFEC(false),
|
||||
bDisableLTTPR(false),
|
||||
linkTrainCounter(0)
|
||||
{
|
||||
// Reverse engineer a link configuration from Total TotalLinkPBN
|
||||
// Note that HBR2 twice HBR. The table below treats HBR2x1 and HBRx2, etc.
|
||||
|
||||
//
|
||||
// BW Effective Lanes Total TotalLinkPBN
|
||||
// 165 1 195.5555556
|
||||
// 165 2 391.1111111
|
||||
// 165 4 782.2222222
|
||||
// 270 1 320
|
||||
// 270 2 640
|
||||
// 270 4 1280
|
||||
// 270 8 2560
|
||||
//
|
||||
|
||||
if (TotalLinkPBN <= 90)
|
||||
peakRatePossible = peakRate = RBR, minRate = linkOverhead(RBR), lanes=0; // FAIL
|
||||
if (TotalLinkPBN <= 195)
|
||||
peakRatePossible = peakRate = RBR, minRate = linkOverhead(RBR), lanes=1;
|
||||
else if (TotalLinkPBN <= 320)
|
||||
peakRatePossible = peakRate = HBR, minRate=linkOverhead(HBR), lanes = 1;
|
||||
else if (TotalLinkPBN <= 391)
|
||||
peakRatePossible = peakRate = RBR, minRate=linkOverhead(RBR), lanes = 2;
|
||||
else if (TotalLinkPBN <= 640)
|
||||
peakRatePossible = peakRate = HBR, minRate=linkOverhead(HBR), lanes = 2; // could be HBR2x1, but TotalLinkPBN works out same
|
||||
else if (TotalLinkPBN <= 782)
|
||||
peakRatePossible = peakRate = RBR, minRate=linkOverhead(RBR), lanes = 4;
|
||||
else if (TotalLinkPBN <= 960)
|
||||
peakRatePossible = peakRate = HBR3, minRate=linkOverhead(HBR3), lanes = 1;
|
||||
else if (TotalLinkPBN <= 1280)
|
||||
peakRatePossible = peakRate = HBR, minRate=linkOverhead(HBR), lanes = 4; // could be HBR2x2
|
||||
else if (TotalLinkPBN <= 1920)
|
||||
peakRatePossible = peakRate = HBR3, minRate=linkOverhead(HBR3), lanes = 2; // could be HBR2x
|
||||
else if (TotalLinkPBN <= 2560)
|
||||
peakRatePossible = peakRate = HBR2, minRate=linkOverhead(HBR2), lanes = 4;
|
||||
else if (TotalLinkPBN <= 3840)
|
||||
peakRatePossible = peakRate = HBR3, minRate=linkOverhead(HBR3), lanes = 4;
|
||||
else {
|
||||
peakRatePossible = peakRate = RBR, minRate = linkOverhead(RBR), lanes = 0; // FAIL
|
||||
DP_ASSERT(0 && "Unknown configuration");
|
||||
}
|
||||
}
|
||||
|
||||
void setEnhancedFraming(bool newEnhancedFraming)
|
||||
{
|
||||
enhancedFraming = newEnhancedFraming;
|
||||
}
|
||||
|
||||
bool isValid()
|
||||
{
|
||||
return lanes != laneCount_0;
|
||||
}
|
||||
|
||||
bool lowerConfig(bool bReduceLaneCnt = false)
|
||||
{
|
||||
//
|
||||
// TODO: bReduceLaneCnt is set to fallback to 4 lanes with lower
|
||||
// valid link rate. But we should reset to max lane count
|
||||
// sink supports instead.
|
||||
//
|
||||
|
||||
LinkRate lowerRate = policy.getLinkRates()->getLowerRate(peakRate);
|
||||
|
||||
if(bReduceLaneCnt)
|
||||
{
|
||||
// Reduce laneCount before reducing linkRate
|
||||
if(lanes == laneCount_1)
|
||||
{
|
||||
if (lowerRate)
|
||||
{
|
||||
lanes = laneCount_4;
|
||||
peakRate = lowerRate;
|
||||
}
|
||||
else
|
||||
{
|
||||
lanes = laneCount_0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
lanes /= 2;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Reduce the link rate instead of lane count
|
||||
if (lowerRate)
|
||||
{
|
||||
peakRate = lowerRate;
|
||||
}
|
||||
else
|
||||
{
|
||||
lanes /= 2;
|
||||
}
|
||||
}
|
||||
|
||||
minRate = linkOverhead(peakRate);
|
||||
|
||||
return lanes != laneCount_0;
|
||||
}
|
||||
|
||||
void setLaneRate(LinkRate newRate, unsigned newLanes)
|
||||
{
|
||||
peakRate = newRate;
|
||||
lanes = newLanes;
|
||||
minRate = linkOverhead(peakRate);
|
||||
}
|
||||
|
||||
unsigned pbnTotal()
|
||||
{
|
||||
return PBNForSlots(totalUsableTimeslots);
|
||||
}
|
||||
|
||||
void pbnRequired(const ModesetInfo & modesetInfo, unsigned & base_pbn, unsigned & slots, unsigned & slots_pbn)
|
||||
{
|
||||
base_pbn = pbnForMode(modesetInfo);
|
||||
slots = slotsForPBN(base_pbn);
|
||||
slots_pbn = PBNForSlots(slots);
|
||||
}
|
||||
|
||||
NvU32 slotsForPBN(NvU32 allocatedPBN, bool usable = false)
|
||||
{
|
||||
NvU64 bytes_per_pbn = 54 * 1000000 / 64; // this comes out exact
|
||||
NvU64 bytes_per_timeslot = peakRate * lanes / 64;
|
||||
|
||||
if (bytes_per_timeslot == 0)
|
||||
return (NvU32)-1;
|
||||
|
||||
if (usable)
|
||||
{
|
||||
// round down to find the usable integral slots for a given value of PBN.
|
||||
NvU32 slots = (NvU32)divide_floor(allocatedPBN * bytes_per_pbn, bytes_per_timeslot);
|
||||
DP_ASSERT(slots <= 64);
|
||||
|
||||
return slots;
|
||||
}
|
||||
else
|
||||
return (NvU32)divide_ceil(allocatedPBN * bytes_per_pbn, bytes_per_timeslot);
|
||||
}
|
||||
|
||||
NvU32 PBNForSlots(NvU32 slots) // Rounded down
|
||||
{
|
||||
NvU64 bytes_per_pbn = 54 * 1000000 / 64; // this comes out exact
|
||||
NvU64 bytes_per_timeslot = peakRate * lanes / 64;
|
||||
|
||||
return (NvU32)(bytes_per_timeslot * slots/ bytes_per_pbn);
|
||||
}
|
||||
|
||||
bool operator!= (const LinkConfiguration & right) const
|
||||
{
|
||||
return !(*this == right);
|
||||
}
|
||||
|
||||
bool operator== (const LinkConfiguration & right) const
|
||||
{
|
||||
return (this->lanes == right.lanes &&
|
||||
this->peakRate == right.peakRate &&
|
||||
this->enhancedFraming == right.enhancedFraming &&
|
||||
this->multistream == right.multistream &&
|
||||
this->bEnableFEC == right.bEnableFEC);
|
||||
}
|
||||
|
||||
bool operator< (const LinkConfiguration & right) const
|
||||
{
|
||||
NvU64 leftMKBps = peakRate * lanes;
|
||||
NvU64 rightMKBps = right.peakRate * right.lanes;
|
||||
|
||||
if (leftMKBps == rightMKBps)
|
||||
{
|
||||
return (lanes < right.lanes);
|
||||
}
|
||||
else
|
||||
{
|
||||
return (leftMKBps < rightMKBps);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
#endif //INCLUDED_DP_LINKCONFIG_H
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,331 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/******************************* DisplayPort********************************\
|
||||
* *
|
||||
* Module: dp_groupimpl.cpp *
|
||||
* DP device group implementation *
|
||||
* *
|
||||
\***************************************************************************/
|
||||
|
||||
#include "dp_internal.h"
|
||||
#include "dp_connector.h"
|
||||
#include "dp_list.h"
|
||||
#include "dp_auxdefs.h"
|
||||
#include "dp_deviceimpl.h"
|
||||
#include "dp_groupimpl.h"
|
||||
#include "dp_connectorimpl.h"
|
||||
|
||||
using namespace DisplayPort;
|
||||
|
||||
void GroupImpl::update(Device * dev, bool allocationState)
|
||||
{
|
||||
Address::StringBuffer sb;
|
||||
Address devAddress = dev->getTopologyAddress();
|
||||
DP_USED(sb);
|
||||
|
||||
// Do not map a stream that is not yet turned on in the gpu. An update shall be sent later during NAE.
|
||||
if (allocationState && !this->isHeadAttached())
|
||||
return;
|
||||
|
||||
//
|
||||
// Do not enable the stream on an unplugged device but take care of
|
||||
// detached devices. We need to clear PBNs allocated by such devices
|
||||
//
|
||||
if (allocationState && !((DeviceImpl *)dev)->plugged)
|
||||
return;
|
||||
|
||||
//
|
||||
// Check if Parent's messageManager exist or not. This is required for cases
|
||||
// where parent branch itself has been unplugged. No message can be sent in this case.
|
||||
//
|
||||
if (!parent->messageManager)
|
||||
return;
|
||||
|
||||
if (timeslot.count == 0 ||
|
||||
((DeviceImpl *)dev)->payloadAllocated == allocationState)
|
||||
return;
|
||||
|
||||
if (!dev->getParent() || !((dev->getParent())->isPlugged()))
|
||||
{
|
||||
DeviceImpl * parentDev = NULL;
|
||||
|
||||
//
|
||||
// Send ALLOCATE_PAYLOAD with pbn 0 to parent port of previous branch
|
||||
// Find first plugged parent branch & send message to it
|
||||
//
|
||||
while(devAddress.size() > 2)
|
||||
{
|
||||
devAddress.pop();
|
||||
parentDev = parent->findDeviceInList(devAddress.parent());
|
||||
|
||||
if (parentDev && parentDev->isPlugged())
|
||||
break;
|
||||
}
|
||||
|
||||
// If no parent found simply return as we don't have a valid address to send message
|
||||
if (!parentDev)
|
||||
return;
|
||||
}
|
||||
|
||||
NakData nakData;
|
||||
for (int retries = 0 ; retries < 7; retries++)
|
||||
{
|
||||
AllocatePayloadMessage allocate;
|
||||
unsigned sink = 0; // hardcode the audio sink to 0th in the device.
|
||||
allocate.set(devAddress.parent(), devAddress.tail(),
|
||||
dev->isAudioSink() ? 1 : 0, streamIndex, allocationState ? timeslot.PBN : 0,
|
||||
&sink, true);
|
||||
|
||||
// Trigger a refetch of epr
|
||||
((DeviceImpl *)dev)->bandwidth.enum_path.dataValid = false;
|
||||
DeviceImpl * tail = (DeviceImpl *) dev;
|
||||
while (tail && tail->getParent())
|
||||
{
|
||||
tail->bandwidth.enum_path.dataValid = false;
|
||||
tail = (DeviceImpl *)tail->getParent();
|
||||
}
|
||||
|
||||
if (parent->messageManager->send(&allocate, nakData))
|
||||
{
|
||||
if (allocationState)
|
||||
{
|
||||
DP_LOG(("DP-TM> Attached stream:%d to %s", streamIndex, dev->getTopologyAddress().toString(sb)));
|
||||
}
|
||||
else
|
||||
{
|
||||
DP_LOG(("DP-TM> Detached stream:%d from %s", streamIndex, dev->getTopologyAddress().toString(sb)));
|
||||
}
|
||||
|
||||
((DeviceImpl *)dev)->payloadAllocated = allocationState;
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// we should not have ideally reached here unless allocate payload failed.
|
||||
if (allocationState)
|
||||
{
|
||||
DP_LOG(("DP-TM> Allocate_payload: Failed to ATTACH stream:%d to %s", streamIndex, dev->getTopologyAddress().toString(sb)));
|
||||
DP_ASSERT(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
DP_LOG(("DP-TM> Allocate_payload: Failed to DETACH stream:%d from %s", streamIndex, dev->getTopologyAddress().toString(sb)));
|
||||
DP_ASSERT(0);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void GroupImpl::insert(Device * dev)
|
||||
{
|
||||
DP_ASSERT(!headInFirmware && "Cannot add or remove from a firmware group. You must perform a modeset away from the device");
|
||||
DeviceImpl * di = (DeviceImpl *)dev;
|
||||
|
||||
if (isHeadAttached())
|
||||
{
|
||||
if (di->activeGroup && di->activeGroup != this)
|
||||
{
|
||||
DP_ASSERT(0 && "Device already in active group, cannot add to another active group!");
|
||||
return;
|
||||
}
|
||||
di->activeGroup = this;
|
||||
}
|
||||
|
||||
members.insertFront(di);
|
||||
|
||||
update(dev, true);
|
||||
|
||||
}
|
||||
|
||||
void GroupImpl::remove(Device * dev)
|
||||
{
|
||||
DP_ASSERT(!headInFirmware && "Cannot add or remove from a firmware group. You must perform a modeset away from the device");
|
||||
|
||||
DeviceImpl * di = (DeviceImpl *)dev;
|
||||
|
||||
if (isHeadAttached())
|
||||
{
|
||||
di->activeGroup = 0;
|
||||
}
|
||||
members.remove(di);
|
||||
|
||||
update(dev, false);
|
||||
|
||||
updateVbiosScratchRegister(dev);
|
||||
}
|
||||
|
||||
void GroupImpl::destroy()
|
||||
{
|
||||
for (Device * i = enumDevices(0); i; i = enumDevices(i))
|
||||
remove(i);
|
||||
|
||||
// Cancel any queue the auth callback.
|
||||
cancelHdcpCallbacks();
|
||||
|
||||
delete this;
|
||||
}
|
||||
|
||||
void GroupImpl::cancelHdcpCallbacks()
|
||||
{
|
||||
authRetries = 0;
|
||||
|
||||
parent->timer->cancelCallback(this, &tagHDCPReauthentication);
|
||||
parent->timer->cancelCallback(this, &tagStreamValidation);
|
||||
|
||||
}
|
||||
|
||||
Device * GroupImpl::enumDevices(Device * previousDevice)
|
||||
{
|
||||
return members.next(previousDevice);
|
||||
}
|
||||
|
||||
void GroupImpl::expired(const void * tag)
|
||||
{
|
||||
if (tag == &tagHDCPReauthentication)
|
||||
{
|
||||
HDCPState hdcpState = {0};
|
||||
parent->main->configureHDCPGetHDCPState(hdcpState);
|
||||
|
||||
if (authRetries < HDCP_AUTHENTICATION_RETRIES)
|
||||
{
|
||||
this->hdcpEnabled = hdcpState.HDCP_State_Encryption;
|
||||
if (hdcpState.HDCP_State_Authenticated)
|
||||
{
|
||||
parent->isHDCPAuthOn = true;
|
||||
authRetries = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
unsigned authDelay = (hdcpState.HDCP_State_22_Capable ?
|
||||
HDCP22_AUTHENTICATION_COOLDOWN : HDCP_AUTHENTICATION_COOLDOWN);
|
||||
|
||||
authRetries++;
|
||||
parent->main->configureHDCPRenegotiate();
|
||||
parent->isHDCPAuthOn = false;
|
||||
parent->timer->queueCallback(this, &tagHDCPReauthentication,
|
||||
authDelay);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
parent->isHDCPAuthOn = this->hdcpEnabled = false;
|
||||
}
|
||||
}
|
||||
else if ( tag == &tagStreamValidation)
|
||||
{
|
||||
if (!(this->streamValidationDone))
|
||||
{
|
||||
// If we are here we need to debug what has caused the problem for not getting notification from DD.
|
||||
DP_ASSERT(0 && "DP> Didn't get final notification." );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool GroupImpl::hdcpGetEncrypted()
|
||||
{
|
||||
//
|
||||
// Returns whether encryption is currently enabled
|
||||
//
|
||||
if (parent->isHDCPAuthOn)
|
||||
{
|
||||
return this->hdcpEnabled;
|
||||
}
|
||||
else
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void GroupImpl::updateVbiosScratchRegister(Device * lastDev)
|
||||
{
|
||||
if (!parent->bDisableVbiosScratchRegisterUpdate &&
|
||||
parent->lastDeviceSetForVbios == lastDev)
|
||||
{
|
||||
// Take a device which is part of a group
|
||||
for (ListElement * e = parent->deviceList.begin();
|
||||
e != parent->deviceList.end(); e = e->next)
|
||||
{
|
||||
DeviceImpl * dev = (DeviceImpl *)e;
|
||||
|
||||
if (dev->activeGroup && dev->activeGroup->isHeadAttached())
|
||||
{
|
||||
NvU32 address = 0;
|
||||
NvU32 addrSize = dev->getTopologyAddress().size();
|
||||
|
||||
// Set the MS_SCRATCH_REGISTER for lighted up display
|
||||
for (NvU32 i = addrSize; i; --i)
|
||||
{
|
||||
address |= ((dev->address[i-1] & 0xF) << ((addrSize - i)*4));
|
||||
}
|
||||
|
||||
parent->main->configureMsScratchRegisters(address, addrSize, 3);
|
||||
|
||||
parent->lastDeviceSetForVbios = (Device *)dev;
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Helper function for attaching and detaching heads.
|
||||
//
|
||||
// For attach, we will assert if group already has head attached but for
|
||||
// some device in the group, active group did not point to current group.
|
||||
// For detach, we will assert if the group does not have head attached but
|
||||
// some device in group has an active group OR head is marked attached but
|
||||
// not all devies in the group have the current group as active group.
|
||||
// This also sets or clears dev->activeGroup for each contained
|
||||
// device.
|
||||
//
|
||||
void GroupImpl::setHeadAttached(bool attached)
|
||||
{
|
||||
for (Device * i = enumDevices(0); i; i = enumDevices(i))
|
||||
{
|
||||
DeviceImpl *di = (DeviceImpl *)i;
|
||||
|
||||
if (attached)
|
||||
{
|
||||
if (headAttached)
|
||||
{
|
||||
DP_ASSERT(di->activeGroup == this);
|
||||
}
|
||||
di->activeGroup = this;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!headAttached)
|
||||
{
|
||||
DP_ASSERT(di->activeGroup == NULL);
|
||||
}
|
||||
else
|
||||
{
|
||||
DP_ASSERT(di->activeGroup == this);
|
||||
}
|
||||
di->activeGroup = NULL;
|
||||
}
|
||||
}
|
||||
headAttached = attached;
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#define NV_DPCD20_DSC_SUPPORT (0x00000060) /* R-XUR */
|
||||
#define NV_DPCD20_DSC_SUPPORT_PASS_THROUGH_SUPPORT 1:1 /* R-XUF */
|
||||
#define NV_DPCD20_DSC_SUPPORT_PASS_THROUGH_SUPPORT_NO (0x00000000) /* R-XUV */
|
||||
#define NV_DPCD20_DSC_SUPPORT_PASS_THROUGH_SUPPORT_YES (0x00000001) /* R-XUV */
|
||||
|
||||
#define NV_DPCD20_DSC_PASS_THROUGH (0x00000160) /* R-XUR */
|
||||
#define NV_DPCD20_DSC_PASS_THROUGH_ENABLE 1:1 /* R-XUF */
|
||||
#define NV_DPCD20_DSC_PASS_THROUGH_ENABLE_NO (0x00000000) /* R-XUV */
|
||||
#define NV_DPCD20_DSC_PASS_THROUGH_ENABLE_YES (0x00000001) /* R-XUV */
|
||||
|
||||
#define NV_DPCD20_GUID_2 (0x00000040) /* R-XUR */
|
||||
|
||||
// PANEL REPLAY RELATED DPCD
|
||||
#define NV_DPCD20_PANEL_REPLAY_CAPABILITY (0x000000B0)
|
||||
#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED 0:0
|
||||
#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED_NO (0x00000000)
|
||||
#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SUPPORTED_YES (0x00000001)
|
||||
#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE 1:1
|
||||
#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE_NO (0x00000000)
|
||||
#define NV_DPCD20_PANEL_REPLAY_CAPABILITY_SEL_UPDATE_YES (0x00000001)
|
||||
|
||||
#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION (0x000001B0)
|
||||
#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE 0:0
|
||||
#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE_NO (0x00000000)
|
||||
#define NV_DPCD20_PANEL_REPLAY_CONFIGURATION_ENABLE_PR_MODE_YES (0x00000001)
|
||||
@@ -1,67 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2003-2021 NVIDIA CORPORATION & AFFILIATES
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __v03_00_dev_disp_h__
|
||||
#define __v03_00_dev_disp_h__
|
||||
#define NV_PDISP_CHN_NUM_CORE 0 /* */
|
||||
#define NV_PDISP_CHN_NUM_WIN(i) (1+(i)) /* */
|
||||
#define NV_PDISP_CHN_NUM_WIN__SIZE_1 32 /* */
|
||||
#define NV_PDISP_CHN_NUM_WINIM(i) (33+(i)) /* */
|
||||
#define NV_PDISP_CHN_NUM_WINIM__SIZE_1 32 /* */
|
||||
#define NV_PDISP_CHN_NUM_CURS(i) (73+(i)) /* */
|
||||
#define NV_PDISP_CHN_NUM_CURS__SIZE_1 8 /* */
|
||||
#define NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS(i) (0+(i)):(0+(i)) /* R--VF */
|
||||
#define NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS__SIZE_1 8 /* */
|
||||
#define NV_PDISP_FE_HW_SYS_CAP_HEAD_EXISTS_YES 0x00000001 /* R---V */
|
||||
#define NV_PDISP_FE_SW 0x00640FFF:0x00640000 /* RW--D */
|
||||
#define NV_PDISP_SF_USER_0 0x006F03FF:0x006F0000 /* RW--D */
|
||||
#define NV_UDISP_HASH_BASE 0x00000000 /* */
|
||||
#define NV_UDISP_HASH_LIMIT 0x00001FFF /* */
|
||||
#define NV_UDISP_OBJ_MEM_BASE 0x00002000 /* */
|
||||
#define NV_UDISP_OBJ_MEM_LIMIT 0x0000FFFF /* */
|
||||
#define NV_UDISP_HASH_TBL_CLIENT_ID (1*32+13):(1*32+0) /* RWXVF */
|
||||
#define NV_UDISP_HASH_TBL_INSTANCE (1*32+24):(1*32+14) /* RWXUF */
|
||||
#define NV_UDISP_HASH_TBL_CHN (1*32+31):(1*32+25) /* RWXUF */
|
||||
#define NV_DMA_TARGET_NODE (0*32+1):(0*32+0) /* RWXVF */
|
||||
#define NV_DMA_TARGET_NODE_PHYSICAL_NVM 0x00000001 /* RW--V */
|
||||
#define NV_DMA_TARGET_NODE_PHYSICAL_PCI 0x00000002 /* RW--V */
|
||||
#define NV_DMA_TARGET_NODE_PHYSICAL_PCI_COHERENT 0x00000003 /* RW--V */
|
||||
#define NV_DMA_ACCESS (0*32+2):(0*32+2) /* RWXVF */
|
||||
#define NV_DMA_ACCESS_READ_ONLY 0x00000000 /* RW--V */
|
||||
#define NV_DMA_ACCESS_READ_AND_WRITE 0x00000001 /* RW--V */
|
||||
#define NV_DMA_KIND (0*32+20):(0*32+20) /* RWXVF */
|
||||
#define NV_DMA_KIND_PITCH 0x00000000 /* RW--V */
|
||||
#define NV_DMA_KIND_BLOCKLINEAR 0x00000001 /* RW--V */
|
||||
#define NV_DMA_ADDRESS_BASE_LO (1*32+31):(1*32+0) /* RWXUF */
|
||||
#define NV_DMA_ADDRESS_BASE_HI (2*32+6):(2*32+0) /* RWXUF */
|
||||
#define NV_DMA_ADDRESS_LIMIT_LO (3*32+31):(3*32+0) /* RWXUF */
|
||||
#define NV_DMA_ADDRESS_LIMIT_HI (4*32+6):(4*32+0) /* RWXUF */
|
||||
#define NV_DMA_SIZE 20 /* */
|
||||
#define NV_UDISP_FE_CHN_ASSY_BASEADR_CORE 0x00680000 /* */
|
||||
#define NV_UDISP_FE_CHN_ARMED_BASEADR_CORE (0x00680000+32768) /* */
|
||||
#define NV_UDISP_FE_CHN_ASSY_BASEADR_WIN(i) ((0x00690000+(i)*4096)) /* */
|
||||
#define NV_UDISP_FE_CHN_ASSY_BASEADR_WINIM(i) ((0x00690000+((i+32)*4096))) /* */
|
||||
#define NV_UDISP_FE_CHN_ASSY_BASEADR_CURS(i) (0x006D8000+(i)*4096) /* RW-4A */
|
||||
#define NV_UDISP_FE_CHN_ASSY_BASEADR(i) ((i)>0?(((0x00690000+(i-1)*4096))):0x00680000) /* */
|
||||
#define NV_UDISP_FE_CHN_ASSY_BASEADR__SIZE_1 81 /* */
|
||||
#endif // __v03_00_dev_disp_h__
|
||||
@@ -1,69 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2018-2018, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _cl00c1_h_
|
||||
#define _cl00c1_h_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#include "nvtypes.h"
|
||||
#include "nvlimits.h"
|
||||
|
||||
#define NV_FB_SEGMENT (0x000000C1)
|
||||
|
||||
/*
|
||||
* NV_FB_SEGMENT_ALLOCATION_PARAMS - Allocation params to create FB segment through
|
||||
* NvRmAlloc.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
NvHandle hCtxDma; // unused
|
||||
NvU32 subDeviceIDMask NV_ALIGN_BYTES(8);
|
||||
NvU64 dmaOffset NV_ALIGN_BYTES(8); // unused
|
||||
NvU64 VidOffset NV_ALIGN_BYTES(8);
|
||||
NvU64 Offset NV_ALIGN_BYTES(8); // To be deprecated
|
||||
NvU64 pOffset[NV_MAX_SUBDEVICES] NV_ALIGN_BYTES(8);
|
||||
NvU64 Length NV_ALIGN_BYTES(8);
|
||||
NvU64 ValidLength NV_ALIGN_BYTES(8);
|
||||
NvP64 pPageArray NV_ALIGN_BYTES(8);
|
||||
NvU32 startPageIndex;
|
||||
NvHandle AllocHintHandle;
|
||||
NvU32 Flags;
|
||||
NvHandle hMemory; // Not used in NvRmAlloc path; only used in CTRL path
|
||||
NvHandle hClient; // Not used in NvRmAlloc path; only used in CTRL path
|
||||
NvHandle hDevice; // Not used in NvRmAlloc path; only used in CTRL path
|
||||
NvP64 pCpuAddress NV_ALIGN_BYTES(8); // To be deprecated
|
||||
NvP64 ppCpuAddress[NV_MAX_SUBDEVICES] NV_ALIGN_BYTES(8);
|
||||
NvU64 GpuAddress NV_ALIGN_BYTES(8); // To be deprecated
|
||||
NvU64 pGpuAddress[NV_MAX_SUBDEVICES] NV_ALIGN_BYTES(8);
|
||||
NvHandle hAllocHintClient;
|
||||
NvU32 kind;
|
||||
NvU32 compTag;
|
||||
} NV_FB_SEGMENT_ALLOCATION_PARAMS;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif /* _cl00c1_h_ */
|
||||
@@ -1,244 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017-2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _cl90cd_h_
|
||||
#define _cl90cd_h_
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* NV_EVENT_BUFFER
|
||||
* An event buffer is shared between user (RO) and kernel(RW).
|
||||
* It holds debug/profile event data provided by the kernel.
|
||||
*
|
||||
*/
|
||||
#define NV_EVENT_BUFFER (0x000090CD)
|
||||
|
||||
/*
|
||||
* NV_EVENT_BUFFER_HEADER
|
||||
* This structure holds the get and put values used to index/consume event buffer.
|
||||
* Along with other RO data shared with the user.
|
||||
*
|
||||
* recordGet/Put: These "pointers" work in the traditional sense:
|
||||
* - when GET==PUT, the fifo is empty
|
||||
* - when GET==PUT+1, the fifo is full
|
||||
* This implies a full fifo always has one "wasted" element.
|
||||
*
|
||||
* recordCount: This is the total number of records added to the buffer by the kernel
|
||||
* This information is filled out when the buffer is setup to keep newest records.
|
||||
* recordCount = number of records currently in the buffer + overflow count.
|
||||
*
|
||||
* recordDropcount: This is the number of event records that are dropped because the
|
||||
* buffer is full.
|
||||
* This information is filled out when event buffer is setup to keep oldest records.
|
||||
*
|
||||
* vardataDropcount: Event buffer provides a dual stream of data, where the record can contain
|
||||
* an optional offset to a variable length data buffer.
|
||||
* This is the number of variable data records that are dropped because the
|
||||
* buffer is full.
|
||||
* This information is filled out when event buffer is setup to keep oldest records.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
NvU32 recordGet;
|
||||
NvU32 recordPut;
|
||||
NvU64 recordCount;
|
||||
NvU64 recordDropcount;
|
||||
NvU64 vardataDropcount;
|
||||
} NV_EVENT_BUFFER_HEADER;
|
||||
|
||||
/*
|
||||
* NV_EVENT_BUFFER_RECORD_HEADER
|
||||
* This is the header added to each event record.
|
||||
* This helps identify the event type and variable length data is associated with it.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
NvU16 type;
|
||||
NvU16 subtype;
|
||||
NvU32 varData; // [31: 5] = (varDataOffset >> 5); 0 < vardataOffset <= vardataBufferSize
|
||||
// [ 4: 1] = reserved for future use
|
||||
// [ 0: 0] = isVardataStartOffsetZero
|
||||
} NV_EVENT_BUFFER_RECORD_HEADER;
|
||||
|
||||
/*
|
||||
* NV_EVENT_BUFFER_RECORD
|
||||
* This structure defines a generic event record.
|
||||
* The size of this record is fixed for a given event buffer.
|
||||
* It is configured by the user during allocation.
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
NV_EVENT_BUFFER_RECORD_HEADER recordHeader;
|
||||
NvU64 inlinePayload[1] NV_ALIGN_BYTES(8); // 1st element of the payload/data
|
||||
// Do not add more elements here, inlinePayload can contain more than one elements
|
||||
} NV_EVENT_BUFFER_RECORD;
|
||||
|
||||
#define NV_EVENT_VARDATA_GRANULARITY 32
|
||||
#define NV_EVENT_VARDATA_OFFSET_MASK (~(NV_EVENT_VARDATA_GRANULARITY - 1))
|
||||
#define NV_EVENT_VARDATA_START_OFFSET_ZERO 0x01
|
||||
|
||||
/*
|
||||
* NV_EVENT_BUFFER_ALLOC_PARAMETERS
|
||||
*
|
||||
* bufferHeader [OUT]
|
||||
* This is the user VA offset pointing to the base of NV_EVENT_BUFFER_HEADER.
|
||||
*
|
||||
* recordBuffer [OUT]
|
||||
* This is the user VA offset pointing to the base of the event record buffer.
|
||||
* This buffer will contain NV_EVENT_BUFFER_RECORDs added by the kernel.
|
||||
*
|
||||
* recordSize [IN]
|
||||
* This is the size of NV_EVENT_BUFFER_RECORD used by this buffer
|
||||
*
|
||||
* recordCount [IN]
|
||||
* This is the number of records that recordBuffer can hold.
|
||||
*
|
||||
* vardataBuffer [OUT]
|
||||
* This is the user VA offset pointing to the base of the variable data buffer.
|
||||
*
|
||||
* vardataBufferSize [IN]
|
||||
* Size of the variable data buffer in bytes.
|
||||
*
|
||||
* recordsFreeThreshold [IN]
|
||||
* This is the notification threshold for the event record buffer.
|
||||
* This felid specifies the number of records that the buffer can
|
||||
* still hold before it gets full.
|
||||
*
|
||||
* vardataFreeThreshold [IN]
|
||||
* This is the notification threshold for the vardata buffer.
|
||||
* This felid specifies the number of bytes that the buffer can
|
||||
* still hold before it gets full.
|
||||
*
|
||||
* notificationHandle [IN]
|
||||
* When recordsFreeThreshold or vardataFreeThreshold is met, kernel will notify
|
||||
* user on this handle. If notificationHandle = NULL, event notification
|
||||
* is disabled. This is an OS specific notification handle.
|
||||
* It is a Windows event handle or a fd pointer on Linux.
|
||||
*
|
||||
* hSubDevice [IN]
|
||||
* An event buffer can either hold sub-device related events or system events.
|
||||
* This handle specifies the sub-device to associate this buffer with.
|
||||
* If this parameter is NULL, then the buffer is tied to the client instead.
|
||||
*
|
||||
* flags [IN]
|
||||
* Set to 0 by default.
|
||||
* This field can hold any future flags to configure the buffer if needed.
|
||||
*
|
||||
* hBufferHeader [IN]
|
||||
* The backing memory object for the buffer header. Must be a NV01_MEMORY_DEVICELESS object.
|
||||
* On Windows platforms, a buffer will be internally generated if hBufferHeader is 0.
|
||||
*
|
||||
* hRecordBuffer [IN]
|
||||
* The backing memory object for the record buffer. Must be a NV01_MEMORY_DEVICELESS object.
|
||||
* On Windows platforms, a buffer will be internally generated if hRecordBuffer is 0.
|
||||
*
|
||||
* hVardataBuffer [IN]
|
||||
* The backing memory object for the vardata buffer. Must be a NV01_MEMORY_DEVICELESS object.
|
||||
* On Windows platforms, a buffer will be internally generated if hVardataBuffer is 0.
|
||||
*
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
NvP64 bufferHeader NV_ALIGN_BYTES(8);
|
||||
NvP64 recordBuffer NV_ALIGN_BYTES(8);
|
||||
NvU32 recordSize;
|
||||
NvU32 recordCount;
|
||||
NvP64 vardataBuffer NV_ALIGN_BYTES(8);
|
||||
NvU32 vardataBufferSize;
|
||||
NvU32 recordsFreeThreshold;
|
||||
NvU64 notificationHandle NV_ALIGN_BYTES(8);
|
||||
NvU32 vardataFreeThreshold;
|
||||
NvHandle hSubDevice;
|
||||
NvU32 flags;
|
||||
|
||||
NvHandle hBufferHeader;
|
||||
NvHandle hRecordBuffer;
|
||||
NvHandle hVardataBuffer;
|
||||
} NV_EVENT_BUFFER_ALLOC_PARAMETERS;
|
||||
|
||||
/*
|
||||
* NV_EVENT_BUFFER_BIND
|
||||
* This class is used to allocate an Event Type object bound to a given event buffer.
|
||||
* This allocation call associates an event type with an event buffer.
|
||||
* Multiple event types can be associated with the same buffer as long as they belong to
|
||||
* the same category i.e. either sub-device or system.
|
||||
* When event buffer is enabled, if an event bound to this buffer occurs,
|
||||
* some relevant data gets added to it.
|
||||
* cl2080.h has a list of sub-device events that can be associated with a buffer
|
||||
* cl0000.h has a list of system events that can be associated with a buffer
|
||||
* These defines are also used in class NV01_EVENT_OS_EVENT (0x79) to get event notification
|
||||
* and class NV01_EVENT_KERNEL_CALLBACK_EX (0x7E) to get kernel callbacks.
|
||||
* This class extends that support to additionally get relevant data in an event buffer
|
||||
*
|
||||
*/
|
||||
#define NV_EVENT_BUFFER_BIND (0x0000007F)
|
||||
|
||||
/*
|
||||
* NV_EVENT_BUFFER_BIND_PARAMETERS
|
||||
*
|
||||
* bufferHandle [IN]
|
||||
* Event buffer handle used to bind the given event type
|
||||
*
|
||||
* eventType [IN]
|
||||
* This is one of the eventTypeIDs from cl2080.h/cl000.h
|
||||
* e.g. NV2080_NOTIFIERS_PSTATE_CHANGE
|
||||
*
|
||||
* eventSubtype [IN]
|
||||
* Event subtype for a given type of event.
|
||||
* This field is optional depending on if an eventtype has a subtype.
|
||||
*
|
||||
* hClientTarget [IN]
|
||||
* Handle of the target client whose events are to be bound to the given buffer
|
||||
* e.g. context switch events can be tracked for a given client.
|
||||
* This field is optional depending on the event type.
|
||||
* e.g. pstate change events are per gpu but do not depend on a client.
|
||||
*
|
||||
* hSrcResource [IN]
|
||||
* source resource handle for the event type
|
||||
* e.g. channel handle: RC/context switch can be tracked for a given channel
|
||||
* This field is optional depending on the event type.
|
||||
* e.g. pstate change events are per gpu and cannot be sub-categorized
|
||||
*
|
||||
* KernelCallbackdata [IN]
|
||||
* This field is reserved for KERNEL ONLY clients.
|
||||
*
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
NvHandle bufferHandle;
|
||||
NvU16 eventType;
|
||||
NvU16 eventSubtype;
|
||||
NvHandle hClientTarget;
|
||||
NvHandle hSrcResource;
|
||||
NvP64 KernelCallbackdata NV_ALIGN_BYTES(8);
|
||||
} NV_EVENT_BUFFER_BIND_PARAMETERS;
|
||||
|
||||
#ifdef __cplusplus
|
||||
}; /* extern "C" */
|
||||
#endif
|
||||
|
||||
#endif // _cl90cd_h_
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,240 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <nvtypes.h>
|
||||
|
||||
//
|
||||
// This file was generated with FINN, an NVIDIA coding tool.
|
||||
// Source file: ctrl/ctrl2080/ctrl2080bios.finn
|
||||
//
|
||||
|
||||
#include "ctrl/ctrl2080/ctrl2080base.h"
|
||||
|
||||
/* NV20_SUBDEVICE_XX bios-related control commands and parameters */
|
||||
|
||||
|
||||
|
||||
typedef struct NV2080_CTRL_BIOS_INFO {
|
||||
NvU32 index;
|
||||
NvU32 data;
|
||||
} NV2080_CTRL_BIOS_INFO;
|
||||
|
||||
/* Maximum number of bios infos that can be queried at once */
|
||||
#define NV2080_CTRL_BIOS_INFO_MAX_SIZE (0x0000000F)
|
||||
|
||||
#define NV2080_CTRL_BIOS_INFO_INDEX_REVISION (0x00000000)
|
||||
#define NV2080_CTRL_BIOS_INFO_INDEX_OEM_REVISION (0x00000001)
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* NV2080_CTRL_CMD_BIOS_GET_INFO
|
||||
*
|
||||
* This command returns bios information for the associated GPU.
|
||||
* Requests to retrieve bios information use a list of one or more
|
||||
* NV2080_CTRL_BIOS_INFO structures.
|
||||
*
|
||||
* biosInfoListSize
|
||||
* This field specifies the number of entries on the caller's
|
||||
* biosInfoList.
|
||||
* biosInfoList
|
||||
* This field specifies a pointer in the caller's address space
|
||||
* to the buffer into which the bios information is to be returned.
|
||||
* This buffer must be at least as big as biosInfoListSize multiplied
|
||||
* by the size of the NV2080_CTRL_BIOS_INFO structure.
|
||||
*
|
||||
* Possible status values returned are:
|
||||
* NV_OK
|
||||
* NV_ERR_INVALID_PARAM_STRUCT
|
||||
* NV_ERR_INVALID_ARGUMENT
|
||||
* NV_ERR_OPERATING_SYSTEM
|
||||
*/
|
||||
#define NV2080_CTRL_CMD_BIOS_GET_INFO (0x20800802) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | 0x2" */
|
||||
|
||||
typedef struct NV2080_CTRL_BIOS_GET_INFO_PARAMS {
|
||||
NvU32 biosInfoListSize;
|
||||
NV_DECLARE_ALIGNED(NvP64 biosInfoList, 8);
|
||||
} NV2080_CTRL_BIOS_GET_INFO_PARAMS;
|
||||
|
||||
/*
|
||||
* NV2080_CTRL_CMD_BIOS_GET_INFO_V2
|
||||
*
|
||||
* This command returns bios information for the associated GPU.
|
||||
* Requests to retrieve bios information use a list of one or more
|
||||
* NV2080_CTRL_BIOS_INFO structures.
|
||||
*
|
||||
* biosInfoListSize
|
||||
* This field specifies the number of entries on the caller's
|
||||
* biosInfoList.
|
||||
* biosInfoList
|
||||
* Bios information to be returned.
|
||||
*
|
||||
* Possible status values returned are:
|
||||
* NV_OK
|
||||
* NV_ERR_INVALID_PARAM_STRUCT
|
||||
* NV_ERR_INVALID_ARGUMENT
|
||||
* NV_ERR_OPERATING_SYSTEM
|
||||
*/
|
||||
#define NV2080_CTRL_CMD_BIOS_GET_INFO_V2 (0x20800810) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS_MESSAGE_ID" */
|
||||
|
||||
#define NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS_MESSAGE_ID (0x10U)
|
||||
|
||||
typedef struct NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS {
|
||||
NvU32 biosInfoListSize;
|
||||
NV2080_CTRL_BIOS_INFO biosInfoList[NV2080_CTRL_BIOS_INFO_MAX_SIZE];
|
||||
} NV2080_CTRL_BIOS_GET_INFO_V2_PARAMS;
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* NV2080_CTRL_CMD_BIOS_GET_SKU_INFO
|
||||
*
|
||||
* This command returns information about the current board SKU.
|
||||
* NV_ERR_INVALID_OWNER will be returned if the call
|
||||
* isn't made with the OS as the administrator.
|
||||
*
|
||||
* chipSKU
|
||||
* This field returns the sku for the current chip.
|
||||
* chipSKUMod
|
||||
* This field returns the SKU modifier.
|
||||
* project
|
||||
* This field returns the Project (Board) number.
|
||||
* projectSKU
|
||||
* This field returns the Project (Board) SKU number.
|
||||
* CDP
|
||||
* This field returns the Collaborative Design Project Number.
|
||||
* projectSKUMod
|
||||
* This field returns the Project (Board) SKU Modifier.
|
||||
* businessCycle
|
||||
* This field returns the business cycle the board is associated with.
|
||||
* Possible status values returned are:
|
||||
* NV_OK
|
||||
* NV_ERR_INVALID_OWNER
|
||||
*/
|
||||
#define NV2080_CTRL_CMD_BIOS_GET_SKU_INFO (0x20800808) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS_MESSAGE_ID" */
|
||||
|
||||
/* maximum length of parameter strings */
|
||||
|
||||
|
||||
#define NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS_MESSAGE_ID (0x8U)
|
||||
|
||||
typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
|
||||
NvU32 BoardID;
|
||||
char chipSKU[4];
|
||||
char chipSKUMod[2];
|
||||
char project[5];
|
||||
char projectSKU[5];
|
||||
char CDP[6];
|
||||
char projectSKUMod[2];
|
||||
NvU32 businessCycle;
|
||||
} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
|
||||
|
||||
/*
|
||||
* NV2080_CTRL_CMD_BIOS_GET_POST_TIME
|
||||
|
||||
* This command is used to get the GPU POST time (in milliseconds).
|
||||
* If the associated GPU is the master GPU this value will be recorded
|
||||
* by the VBIOS and retrieved from the KDA buffer. If the associated
|
||||
* GPU is a secondaryGPU then this value will reflect the devinit
|
||||
* processing time.
|
||||
*
|
||||
* vbiosPostTime
|
||||
* This parameter returns the vbios post time in msec.
|
||||
*
|
||||
* Possible return status values are
|
||||
* NV_OK
|
||||
* NV_ERR_NOT_SUPPORTED
|
||||
* NV_ERR_INVALID_ARGUMENT
|
||||
*
|
||||
*/
|
||||
#define NV2080_CTRL_CMD_BIOS_GET_POST_TIME (0x20800809) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS_MESSAGE_ID" */
|
||||
|
||||
#define NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS_MESSAGE_ID (0x9U)
|
||||
|
||||
typedef struct NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS {
|
||||
NV_DECLARE_ALIGNED(NvU64 vbiosPostTime, 8);
|
||||
} NV2080_CTRL_CMD_BIOS_GET_POST_TIME_PARAMS;
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* NV2080_CTRL_CMD_BIOS_GET_UEFI_SUPPORT
|
||||
*
|
||||
* This function is used to give out the UEFI version, UEFI image presence and
|
||||
* Graphics Firmware Mode i.e. whether system is running in UEFI or not.
|
||||
*
|
||||
* version
|
||||
* This parameter returns the UEFI version.
|
||||
*
|
||||
* flags
|
||||
* This parameter indicates UEFI image presence and Graphics Firmware mode.
|
||||
* NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE
|
||||
* This field returns UEFI presence value. Legal values for this
|
||||
* field include:
|
||||
* NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_NO
|
||||
* This value indicates that UEFI image is not present.
|
||||
* NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_YES
|
||||
* This value indicates that UEFI image is present.
|
||||
* NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_PLACEHOLDER
|
||||
* This value indicates that there is a dummy UEFI placeholder,
|
||||
* which can later be updated with a valid UEFI image.
|
||||
* NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_HIDDEN
|
||||
* This value indicates that UEFI image is hidden.
|
||||
* NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING
|
||||
* This field indicates the UEFI running value. Legal values for
|
||||
* this parameter include:
|
||||
* NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_FALSE
|
||||
* This value indicates that UEFI is not running.
|
||||
* NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_TRUE
|
||||
* This value indicates that UEFI is running.
|
||||
*
|
||||
* Possible status values returned are:
|
||||
* NV_OK
|
||||
* NV_ERR_NOT_READY
|
||||
* NV_ERR_INVALID_STATE
|
||||
*/
|
||||
|
||||
#define NV2080_CTRL_CMD_BIOS_GET_UEFI_SUPPORT (0x2080080b) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_BIOS_INTERFACE_ID << 8) | NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS_MESSAGE_ID" */
|
||||
|
||||
#define NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS_MESSAGE_ID (0xBU)
|
||||
|
||||
typedef struct NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS {
|
||||
NvU32 version;
|
||||
NvU32 flags;
|
||||
} NV2080_CTRL_BIOS_GET_UEFI_SUPPORT_PARAMS;
|
||||
|
||||
/* Legal values for flags parameter */
|
||||
#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE 1:0
|
||||
#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_NO (0x00000000)
|
||||
#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_YES (0x00000001)
|
||||
#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_PLACEHOLDER (0x00000002)
|
||||
#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_PRESENCE_HIDDEN (0x00000003)
|
||||
#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING 2:2
|
||||
#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_FALSE (0x00000000)
|
||||
#define NV2080_CTRL_BIOS_UEFI_SUPPORT_FLAGS_RUNNING_TRUE (0x00000001)
|
||||
|
||||
|
||||
|
||||
/* _ctrl2080bios_h_ */
|
||||
@@ -1,63 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <nvtypes.h>
|
||||
|
||||
//
|
||||
// This file was generated with FINN, an NVIDIA coding tool.
|
||||
// Source file: ctrl/ctrl2080/ctrl2080ecc.finn
|
||||
//
|
||||
|
||||
#include "ctrl/ctrl2080/ctrl2080base.h"
|
||||
|
||||
|
||||
|
||||
#define NV2080_CTRL_CMD_ECC_GET_CLIENT_EXPOSED_COUNTERS (0x20803400U) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_ECC_INTERFACE_ID << 8) | NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS_MESSAGE_ID" */
|
||||
|
||||
/*
|
||||
* NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS
|
||||
*
|
||||
* sramLastClearedTimestamp [out]
|
||||
* dramLastClearedTimestamp [out]
|
||||
* unix-epoch based timestamp. These fields indicate when the error counters
|
||||
* were last cleared by the user.
|
||||
*
|
||||
* sramErrorCounts [out]
|
||||
* dramErrorCounts [out]
|
||||
* Aggregate error counts for SRAM and DRAM
|
||||
*/
|
||||
|
||||
#define NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS_MESSAGE_ID (0x0U)
|
||||
|
||||
typedef struct NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS {
|
||||
NvU32 sramLastClearedTimestamp;
|
||||
NvU32 dramLastClearedTimestamp;
|
||||
|
||||
NV_DECLARE_ALIGNED(NvU64 sramCorrectedTotalCounts, 8);
|
||||
NV_DECLARE_ALIGNED(NvU64 sramUncorrectedTotalCounts, 8);
|
||||
NV_DECLARE_ALIGNED(NvU64 dramCorrectedTotalCounts, 8);
|
||||
NV_DECLARE_ALIGNED(NvU64 dramUncorrectedTotalCounts, 8);
|
||||
} NV2080_CTRL_ECC_GET_CLIENT_EXPOSED_COUNTERS_PARAMS;
|
||||
/* _ctrl2080ecc_h_ */
|
||||
@@ -1,82 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <nvtypes.h>
|
||||
|
||||
//
|
||||
// This file was generated with FINN, an NVIDIA coding tool.
|
||||
// Source file: ctrl/ctrl2080/ctrl2080gsp.finn
|
||||
//
|
||||
|
||||
#include "ctrl/ctrl2080/ctrl2080base.h"
|
||||
|
||||
/* NV20_SUBDEVICE_XX GSP control commands and parameters */
|
||||
|
||||
/*
|
||||
* NV2080_CTRL_CMD_GSP_GET_FEATURES
|
||||
*
|
||||
* This command is used to determine which GSP features are
|
||||
* supported on this GPU.
|
||||
*
|
||||
* gspFeatures
|
||||
* Bit mask that specifies GSP features supported.
|
||||
* bValid
|
||||
* If this field is set to NV_TRUE, then above bit mask is
|
||||
* considered valid. Otherwise, bit mask should be ignored
|
||||
* as invalid. bValid will be set to NV_TRUE when RM is a
|
||||
* GSP client with GPU support offloaded to GSP firmware.
|
||||
* bDefaultGspRmGpu
|
||||
* If this field is set to NV_TRUE, it indicates that the
|
||||
* underlying GPU has GSP-RM enabled by default. If set to NV_FALSE,
|
||||
* it indicates that the GPU has GSP-RM disabled by default.
|
||||
* firmwareVersion
|
||||
* This field contains the buffer into which the firmware build version
|
||||
* should be returned, if GPU is offloaded. Otherwise, the buffer
|
||||
* will remain untouched.
|
||||
*
|
||||
* Possible status return values are:
|
||||
* NV_OK
|
||||
* NV_ERR_NOT_SUPPORTED
|
||||
* NV_ERR_INVALID_ARGUMENT
|
||||
*/
|
||||
#define NV2080_CTRL_CMD_GSP_GET_FEATURES (0x20803601) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GSP_INTERFACE_ID << 8) | NV2080_CTRL_GSP_GET_FEATURES_PARAMS_MESSAGE_ID" */
|
||||
|
||||
#define NV2080_GSP_MAX_BUILD_VERSION_LENGTH (0x0000040)
|
||||
|
||||
#define NV2080_CTRL_GSP_GET_FEATURES_PARAMS_MESSAGE_ID (0x1U)
|
||||
|
||||
typedef struct NV2080_CTRL_GSP_GET_FEATURES_PARAMS {
|
||||
NvU32 gspFeatures;
|
||||
NvBool bValid;
|
||||
NvBool bDefaultGspRmGpu;
|
||||
NvU8 firmwareVersion[NV2080_GSP_MAX_BUILD_VERSION_LENGTH];
|
||||
} NV2080_CTRL_GSP_GET_FEATURES_PARAMS;
|
||||
|
||||
/* Valid feature values */
|
||||
#define NV2080_CTRL_GSP_GET_FEATURES_UVM_ENABLED 0:0
|
||||
#define NV2080_CTRL_GSP_GET_FEATURES_UVM_ENABLED_FALSE (0x00000000)
|
||||
#define NV2080_CTRL_GSP_GET_FEATURES_UVM_ENABLED_TRUE (0x00000001)
|
||||
|
||||
// _ctrl2080gsp_h_
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,32 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <nvtypes.h>
|
||||
|
||||
//
|
||||
// This file was generated with FINN, an NVIDIA coding tool.
|
||||
// Source file: ctrl/ctrl2080/ctrl2080nvlink.finn
|
||||
//
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2011-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <nvtypes.h>
|
||||
|
||||
//
|
||||
// This file was generated with FINN, an NVIDIA coding tool.
|
||||
// Source file: ctrl/ctrl2080/ctrl2080power.finn
|
||||
//
|
||||
|
||||
@@ -1,143 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2010-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <nvtypes.h>
|
||||
|
||||
//
|
||||
// This file was generated with FINN, an NVIDIA coding tool.
|
||||
// Source file: ctrl/ctrl5070/ctrl5070event.finn
|
||||
//
|
||||
|
||||
#include "ctrl/ctrl5070/ctrl5070base.h"
|
||||
|
||||
/* NV50_DISPLAY event-related control commands and parameters */
|
||||
|
||||
/*
|
||||
* NV5070_CTRL_CMD_EVENT_SET_NOTIFICATION
|
||||
*
|
||||
* This command sets event notification state for the NV50_DISPLAY object.
|
||||
* This command requires that an instance of NV01_EVENT has been previously
|
||||
* bound to the NV50_DISPLAY object.
|
||||
*
|
||||
* subDeviceInstance
|
||||
* This parameter specifies the subdevice instance within the NV50_DISPLAY
|
||||
* parent device to which the operation should be directed. This parameter
|
||||
* must specify a value between zero and the total number of subdevices
|
||||
* within the parent device. This parameter should be set to zero for
|
||||
* default behavior.
|
||||
* hEvent
|
||||
* This parameter specifies the handle of the NV01_EVENT instance
|
||||
* to be bound to the given subDeviceInstance.
|
||||
* event
|
||||
* This parameter specifies the type of event to which the specified
|
||||
* action is to be applied. This parameter must specify a valid
|
||||
* NOTIFIERS value of display class.
|
||||
* action
|
||||
* This parameter specifies the desired event notification action.
|
||||
* Valid notification actions include:
|
||||
* NV5070_CTRL_SET_EVENT_NOTIFICATION_DISABLE
|
||||
* This action disables event notification for the specified
|
||||
* event for the associated subdevice object.
|
||||
* NV5070_CTRL_SET_EVENT_NOTIFICATION_SINGLE
|
||||
* This action enables single-shot event notification for the
|
||||
* specified event for the associated subdevice object.
|
||||
* NV5070_CTRL_SET_EVENT_NOTIFICATION_REPEAT
|
||||
* This action enables repeated event notification for the specified
|
||||
* event for the associated system controller object.
|
||||
*
|
||||
* Possible status values returned are:
|
||||
* NV_OK
|
||||
* NV_ERR_INVALID_PARAM_STRUCT
|
||||
* NV_ERR_INVALID_ARGUMENT
|
||||
* NV_ERR_INVALID_STATE
|
||||
*/
|
||||
#define NV5070_CTRL_CMD_EVENT_SET_NOTIFICATION (0x50700901) /* finn: Evaluated from "(FINN_NV50_DISPLAY_EVENT_INTERFACE_ID << 8) | NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
|
||||
|
||||
#define NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID (0x1U)
|
||||
|
||||
typedef struct NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
|
||||
NvU32 subDeviceInstance;
|
||||
NvHandle hEvent;
|
||||
NvU32 event;
|
||||
NvU32 action;
|
||||
} NV5070_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
|
||||
|
||||
/* valid action values */
|
||||
#define NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_DISABLE (0x00000000)
|
||||
#define NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_SINGLE (0x00000001)
|
||||
#define NV5070_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT (0x00000002)
|
||||
|
||||
|
||||
/*
|
||||
* NV5070_CTRL_CMD_EVENT_SET_TRIGGER
|
||||
*
|
||||
* This command triggers a software event for the NV50_DISPLAY object.
|
||||
* This command accepts no parameters.
|
||||
*
|
||||
* Possible status values returned are:
|
||||
* NV_OK
|
||||
*/
|
||||
#define NV5070_CTRL_CMD_EVENT_SET_TRIGGER (0x50700902) /* finn: Evaluated from "(FINN_NV50_DISPLAY_EVENT_INTERFACE_ID << 8) | 0x2" */
|
||||
|
||||
|
||||
/*
|
||||
* NV5070_CTRL_CMD_EVENT_SET_NOTIFIER_MEMORY
|
||||
*
|
||||
* hMemory
|
||||
* This parameter specifies the handle of the memory object
|
||||
* that identifies the memory address translation for this
|
||||
* subdevice instance's notification(s). The beginning of the
|
||||
* translation points to an array of notification data structures.
|
||||
* The size of the translation must be at least large enough to hold the
|
||||
* maximum number of notification data structures.
|
||||
* Legal argument values must be instances of the following classes:
|
||||
* NV01_NULL
|
||||
* NV04_MEMORY
|
||||
* When hMemory specifies the NV01_NULL_OBJECT value then any existing
|
||||
* memory translation connection is cleared. There must not be any
|
||||
* pending notifications when this command is issued.
|
||||
*
|
||||
* Possible status values returned are:
|
||||
* NV_OK
|
||||
* NV_ERR_INVALID_PARAM_STRUCT
|
||||
* NV_ERR_INVALID_ARGUMENT
|
||||
* NV_ERR_INVALID_STATE
|
||||
*/
|
||||
#define NV5070_CTRL_CMD_EVENT_SET_MEMORY_NOTIFIES (0x50700903) /* finn: Evaluated from "(FINN_NV50_DISPLAY_EVENT_INTERFACE_ID << 8) | NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS_MESSAGE_ID" */
|
||||
|
||||
#define NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS_MESSAGE_ID (0x3U)
|
||||
|
||||
typedef struct NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS {
|
||||
NvU32 subDeviceInstance;
|
||||
NvHandle hMemory;
|
||||
} NV5070_CTRL_EVENT_SET_MEMORY_NOTIFIES_PARAMS;
|
||||
|
||||
#define NV5070_EVENT_MEMORY_NOTIFIES_STATUS_NOTIFIED 0
|
||||
#define NV5070_EVENT_MEMORY_NOTIFIES_STATUS_PENDING 1
|
||||
#define NV5070_EVENT_MEMORY_NOTIFIES_STATUS_ERROR 2
|
||||
|
||||
|
||||
|
||||
/* _ctrl5070event_h_ */
|
||||
@@ -1,521 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2001-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <nvtypes.h>
|
||||
|
||||
//
|
||||
// This file was generated with FINN, an NVIDIA coding tool.
|
||||
// Source file: ctrl/ctrl5070/ctrl5070seq.finn
|
||||
//
|
||||
|
||||
#include "ctrl/ctrl5070/ctrl5070base.h"
|
||||
|
||||
/*
|
||||
* NV5070_CTRL_CMD_GET_SOR_SEQ_CTL
|
||||
*
|
||||
* This command returns SOR sequencer's power up and down PCs and sequencer
|
||||
* program to be used for power up and dowm.
|
||||
*
|
||||
* orNumber
|
||||
* The OR number for which the seq ctrls are to be modified.
|
||||
*
|
||||
* puPcAlt
|
||||
* Alternate power up PC.
|
||||
*
|
||||
* pdPc
|
||||
* Power down PC.
|
||||
*
|
||||
* pdPcAlt
|
||||
* Alternate power down PC.
|
||||
*
|
||||
* normalStart
|
||||
* Whether normal mode is using normal or alt PC
|
||||
*
|
||||
* safeStart
|
||||
* Whether safe mode is using normal or alt PC
|
||||
*
|
||||
* normalState
|
||||
* Whether normal state is PD or PU.
|
||||
*
|
||||
* safeState
|
||||
* Whether safe state is PD or PU.
|
||||
*
|
||||
* flags
|
||||
* There is only one flag defined currently
|
||||
* 1. GET_SEQ_PROG: Whether or not current seq program must be
|
||||
* return back. Caller should set this to _YES to read the
|
||||
* current seq program.
|
||||
*
|
||||
* seqProgram
|
||||
* The sequencer program consisting of power up and down sequences.
|
||||
* For NV50, this consists of 16 DWORDS. The program is
|
||||
* relevant only when GET_SEQ_PROG flags is set to _YES.
|
||||
*
|
||||
* Possible status values returned are:
|
||||
* NV_OK
|
||||
* NV_ERR_INVALID_ARGUMENT
|
||||
* NV_ERR_GENERIC
|
||||
*/
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL (0x50700301U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS_MESSAGE_ID" */
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PU_PC_ALT_VALUE 3:0
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PD_PC_VALUE 3:0
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PD_PC_ALT_VALUE 3:0
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_START_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_START_VAL_NORMAL (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_START_VAL_ALT (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_START_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_START_VAL_NORMAL (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_START_VAL_ALT (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_STATE_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_STATE_VAL_PD (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_NORMAL_STATE_VAL_PU (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_STATE_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_STATE_VAL_PD (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SAFE_STATE_VAL_PU (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_FLAGS_GET_SEQ_PROG 0:0
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_FLAGS_GET_SEQ_PROG_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_FLAGS_GET_SEQ_PROG_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SEQ_PROG_SIZE 16U
|
||||
#define NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS_MESSAGE_ID (0x1U)
|
||||
|
||||
typedef struct NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS {
|
||||
NV5070_CTRL_CMD_BASE_PARAMS base;
|
||||
NvU32 orNumber;
|
||||
|
||||
NvU32 puPcAlt;
|
||||
NvU32 pdPc;
|
||||
NvU32 pdPcAlt;
|
||||
NvU32 normalStart;
|
||||
NvU32 safeStart;
|
||||
NvU32 normalState;
|
||||
NvU32 safeState;
|
||||
NvU32 flags;
|
||||
NvU32 seqProgram[NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_SEQ_PROG_SIZE];
|
||||
} NV5070_CTRL_CMD_GET_SOR_SEQ_CTL_PARAMS;
|
||||
|
||||
/*
|
||||
* NV5070_CTRL_CMD_SET_SOR_SEQ_CTL
|
||||
*
|
||||
* This command does the following in that order
|
||||
* (a) Loads a specified sequencer program for power up and down.
|
||||
* (b) Updates SOR sequencer's power up and down PCs, tells seq to SKIP
|
||||
* current wait for vsync and waits until sequencer actually SKIPs or halts
|
||||
* (see more below under SKIP_WAIT_FOR_VSYNC flag) and
|
||||
* (c) Update power settings (safe/normal start and state).
|
||||
*
|
||||
* orNumber
|
||||
* The OR number for which the seq ctrls are to be modified.
|
||||
*
|
||||
* puPcAlt
|
||||
* Alternate power up PC.
|
||||
*
|
||||
* pdPc
|
||||
* Power down PC.
|
||||
*
|
||||
* pdPcAlt
|
||||
* Alternate power down PC.
|
||||
*
|
||||
* normalStart
|
||||
* Whether normal mode should use normal or alt PC.
|
||||
*
|
||||
* safeStart
|
||||
* Whether safe mode should use normal or alt PC.
|
||||
*
|
||||
* normalState
|
||||
* Whether normal state should be PD or PU.
|
||||
*
|
||||
* safeState
|
||||
* Whether safe state should be PD or PU.
|
||||
*
|
||||
* flags
|
||||
* The following flags have been defined
|
||||
* 1. SKIP_WAIT_FOR_VSYNC: Should seq be forced to skip waiting
|
||||
* for vsync if it's currently waiting on such an instruction.
|
||||
* If the current instruction doesn't have a wait for vsync,
|
||||
* SKIP will be applied to the next one and so on until
|
||||
* either sequencer halts or an instruction with a wait for
|
||||
* vsync is found. The call will block until seq halts or
|
||||
* SKIPs a wait for vsync.
|
||||
* 2. SEQ_PROG_PRESENT: Whether or not a new seq program has
|
||||
* been specified.
|
||||
*
|
||||
* seqProgram
|
||||
* The sequencer program consisting of power up and down sequences.
|
||||
* For NV50, this consists of 16 DWORDS. The program is
|
||||
* relevant only when SEQ_PROG_PRESENT flags is set to _YES.
|
||||
*
|
||||
* Possible status values returned are:
|
||||
* NV_OK
|
||||
* NV_ERR_INVALID_ARGUMENT
|
||||
* NV_ERR_GENERIC
|
||||
*/
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL (0x50700302U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS_MESSAGE_ID" */
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PU_PC_ALT_VALUE 3:0
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PU_PC_ALT_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PU_PC_ALT_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PU_PC_ALT_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_VALUE 3:0
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_ALT_VALUE 3:0
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_ALT_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_ALT_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PD_PC_ALT_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_VAL_NORMAL (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_VAL_ALT (0x00000001U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_START_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_VAL_NORMAL (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_VAL_ALT (0x00000001U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_START_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_VAL_PD (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_VAL_PU (0x00000001U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_NORMAL_STATE_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_VAL_PD (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_VAL_PU (0x00000001U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SAFE_STATE_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC 0:0
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT 1:1
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT_YES (0x00000001U)
|
||||
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SEQ_PROG_SIZE 16U
|
||||
#define NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS_MESSAGE_ID (0x2U)
|
||||
|
||||
typedef struct NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS {
|
||||
NV5070_CTRL_CMD_BASE_PARAMS base;
|
||||
NvU32 orNumber;
|
||||
|
||||
NvU32 puPcAlt;
|
||||
NvU32 pdPc;
|
||||
NvU32 pdPcAlt;
|
||||
NvU32 normalStart;
|
||||
NvU32 safeStart;
|
||||
NvU32 normalState;
|
||||
NvU32 safeState;
|
||||
NvU32 flags;
|
||||
NvU32 seqProgram[NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_SEQ_PROG_SIZE];
|
||||
} NV5070_CTRL_CMD_SET_SOR_SEQ_CTL_PARAMS;
|
||||
|
||||
/*
|
||||
* NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL
|
||||
*
|
||||
* This command returns PIOR sequencer's power up and down PCs and sequencer
|
||||
* program to be used for power up and dowm.
|
||||
*
|
||||
* orNumber
|
||||
* The OR number for which the seq ctrls are to be modified.
|
||||
*
|
||||
* puPcAlt
|
||||
* Alternate power up PC.
|
||||
*
|
||||
* pdPc
|
||||
* Power down PC.
|
||||
*
|
||||
* pdPcAlt
|
||||
* Alternate power down PC.
|
||||
*
|
||||
* normalStart
|
||||
* Whether normal mode is using normal or alt PC
|
||||
*
|
||||
* safeStart
|
||||
* Whether safe mode is using normal or alt PC
|
||||
*
|
||||
* normalState
|
||||
* Whether normal state is PD or PU.
|
||||
*
|
||||
* safeState
|
||||
* Whether safe state is PD or PU.
|
||||
*
|
||||
* flags
|
||||
* There is only one flag defined currently
|
||||
* 1. GET_SEQ_PROG: Whether or not current seq program must be
|
||||
* return back. Caller should set this to _YES to read the
|
||||
* current seq program.
|
||||
*
|
||||
* seqProgram
|
||||
* The sequencer program consisting of power up and down sequences.
|
||||
* For NV50, this consists of 16 DWORDS. The program is
|
||||
* relevant only when GET_SEQ_PROG flags is set to _YES.
|
||||
*
|
||||
* Possible status values returned are:
|
||||
* NV_OK
|
||||
* NV_ERR_INVALID_ARGUMENT
|
||||
* NV_ERR_GENERIC
|
||||
*/
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL (0x50700303U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS_MESSAGE_ID" */
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PU_PC_ALT_VALUE 3:0
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PD_PC_VALUE 3:0
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PD_PC_ALT_VALUE 3:0
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_START_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_START_VAL_NORMAL (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_START_VAL_ALT (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_START_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_START_VAL_NORMAL (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_START_VAL_ALT (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_STATE_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_STATE_VAL_PD (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_NORMAL_STATE_VAL_PU (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_STATE_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_STATE_VAL_PD (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SAFE_STATE_VAL_PU (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_FLAGS_GET_SEQ_PROG 0:0
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_FLAGS_GET_SEQ_PROG_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_FLAGS_GET_SEQ_PROG_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SEQ_PROG_SIZE 8U
|
||||
#define NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS_MESSAGE_ID (0x3U)
|
||||
|
||||
typedef struct NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS {
|
||||
NV5070_CTRL_CMD_BASE_PARAMS base;
|
||||
NvU32 orNumber;
|
||||
|
||||
NvU32 puPcAlt;
|
||||
NvU32 pdPc;
|
||||
NvU32 pdPcAlt;
|
||||
NvU32 normalStart;
|
||||
NvU32 safeStart;
|
||||
NvU32 normalState;
|
||||
NvU32 safeState;
|
||||
NvU32 flags;
|
||||
NvU32 seqProgram[NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_SEQ_PROG_SIZE];
|
||||
} NV5070_CTRL_CMD_GET_PIOR_SEQ_CTL_PARAMS;
|
||||
|
||||
/*
|
||||
* NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL
|
||||
*
|
||||
* This command does the following in that order
|
||||
* (a) Loads a specified sequencer program for power up and down.
|
||||
* (b) Updates PIOR sequencer's power up and down PCs, tells seq to SKIP
|
||||
* current wait for vsync and waits until sequencer actually SKIPs or halts
|
||||
* (see more below under SKIP_WAIT_FOR_VSYNC flag) and
|
||||
* (c) Update power settings (safe/normal start and state).
|
||||
*
|
||||
* orNumber
|
||||
* The OR number for which the seq ctrls are to be modified.
|
||||
*
|
||||
* puPcAlt
|
||||
* Alternate power up PC.
|
||||
*
|
||||
* pdPc
|
||||
* Power down PC.
|
||||
*
|
||||
* pdPcAlt
|
||||
* Alternate power down PC.
|
||||
*
|
||||
* normalStart
|
||||
* Whether normal mode should use normal or alt PC
|
||||
*
|
||||
* safeStart
|
||||
* Whether safe mode should use normal or alt PC
|
||||
*
|
||||
* normalState
|
||||
* Whether normal state should be PD or PU.
|
||||
*
|
||||
* safeState
|
||||
* Whether safe state should be PD or PU.
|
||||
*
|
||||
* flags
|
||||
* The following flags have been defined
|
||||
* 1. SKIP_WAIT_FOR_VSYNC: Should seq be forced to skip waiting
|
||||
* for vsync if it's currently waiting on such an instruction.
|
||||
* If the current instruction doesn't have a wait for vsync,
|
||||
* SKIP will be applied to the next one and so on until
|
||||
* either sequencer halts or an instruction with a wait for
|
||||
* vsync is found. The call will block until seq halts or
|
||||
* SKIPs a wait for vsync.
|
||||
* 2. SEQ_PROG_PRESENT: Whether or not a new seq program has
|
||||
* been specified.
|
||||
*
|
||||
* seqProgram
|
||||
* The sequencer program consisting of power up and down sequences.
|
||||
* For NV50, this consists of 8 DWORDS. The program is
|
||||
* relevant only when SEQ_PROG_PRESENT flags is set to _YES.
|
||||
*
|
||||
* Possible status values returned are:
|
||||
* NV_OK
|
||||
* NV_ERR_INVALID_ARGUMENT
|
||||
* NV_ERR_GENERIC
|
||||
*/
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL (0x50700304U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS_MESSAGE_ID" */
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PU_PC_ALT_VALUE 3:0
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PU_PC_ALT_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PU_PC_ALT_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PU_PC_ALT_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_VALUE 3:0
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_ALT_VALUE 3:0
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_ALT_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_ALT_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PD_PC_ALT_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_VAL_NORMAL (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_VAL_ALT (0x00000001U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_START_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_VAL_NORMAL (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_VAL_ALT (0x00000001U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_START_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_VAL_PD (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_VAL_PU (0x00000001U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_NORMAL_STATE_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_VAL 0:0
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_VAL_PD (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_VAL_PU (0x00000001U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_SPECIFIED 31:31
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_SPECIFIED_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SAFE_STATE_SPECIFIED_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC 0:0
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SKIP_WAIT_FOR_VSYNC_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT 1:1
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT_NO (0x00000000U)
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_FLAGS_SEQ_PROG_PRESENT_YES (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SEQ_PROG_SIZE 8U
|
||||
#define NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS_MESSAGE_ID (0x4U)
|
||||
|
||||
typedef struct NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS {
|
||||
NV5070_CTRL_CMD_BASE_PARAMS base;
|
||||
NvU32 orNumber;
|
||||
|
||||
NvU32 puPcAlt;
|
||||
NvU32 pdPc;
|
||||
NvU32 pdPcAlt;
|
||||
NvU32 normalStart;
|
||||
NvU32 safeStart;
|
||||
NvU32 normalState;
|
||||
NvU32 safeState;
|
||||
NvU32 flags;
|
||||
NvU32 seqProgram[NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_SEQ_PROG_SIZE];
|
||||
} NV5070_CTRL_CMD_SET_PIOR_SEQ_CTL_PARAMS;
|
||||
|
||||
/*
|
||||
* NV5070_CTRL_CMD_CTRL_SEQ_PROG_SPEED
|
||||
*
|
||||
* This call allows a fast sequencer program to be selected. It's intended for
|
||||
* situations where panel sequencing is not required and the usual sequencing
|
||||
* delays cost too much time.
|
||||
*
|
||||
* displayId
|
||||
* The corresponding display ID. (Note that this call is currently only
|
||||
* supported for LVDS on an internal encoder, i.e. a SOR.)
|
||||
* cmd
|
||||
* The command to perform. Valid values are:
|
||||
* NV5070_CTRL_SEQ_PROG_SPEED_CMD_GET
|
||||
* Get the current state.
|
||||
* NV5070_CTRL_SEQ_PROG_SPEED_CMD_SET
|
||||
* Set the current state.
|
||||
* state
|
||||
* The state of panel sequencing for this displayId. This is an input
|
||||
* when cmd = SET and an output when cmd = GET.
|
||||
*
|
||||
* Possible status values returned are:
|
||||
* NV_OK
|
||||
* NV_ERR_INVALID_PARAM_STRUCT
|
||||
* NV_ERR_NOT_SUPPORTED
|
||||
*
|
||||
*/
|
||||
|
||||
#define NV5070_CTRL_CMD_CTRL_SEQ_PROG_SPEED (0x50700305U) /* finn: Evaluated from "(FINN_NV50_DISPLAY_SEQ_INTERFACE_ID << 8) | NV5070_CTRL_SEQ_PROG_SPEED_PARAMS_MESSAGE_ID" */
|
||||
|
||||
#define NV5070_CTRL_SEQ_PROG_SPEED_CMD_GET (0x00000000U)
|
||||
#define NV5070_CTRL_SEQ_PROG_SPEED_CMD_SET (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_SEQ_PROG_SPEED_STATE_NORMAL (0x00000000U)
|
||||
#define NV5070_CTRL_SEQ_PROG_SPEED_STATE_FAST (0x00000001U)
|
||||
|
||||
#define NV5070_CTRL_SEQ_PROG_SPEED_PARAMS_MESSAGE_ID (0x5U)
|
||||
|
||||
typedef struct NV5070_CTRL_SEQ_PROG_SPEED_PARAMS {
|
||||
NV5070_CTRL_CMD_BASE_PARAMS base;
|
||||
|
||||
NvU32 displayId;
|
||||
|
||||
NvU32 cmd;
|
||||
NvU32 state;
|
||||
} NV5070_CTRL_SEQ_PROG_SPEED_PARAMS;
|
||||
|
||||
/* _ctrl5070seq_h_ */
|
||||
@@ -1,281 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 1993-2022, NVIDIA CORPORATION. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef NVERROR_H
|
||||
#define NVERROR_H
|
||||
/******************************************************************************
|
||||
*
|
||||
* File: nverror.h
|
||||
*
|
||||
* Description:
|
||||
* This file contains the error codes set when the error notifier
|
||||
* is signaled.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
#define ROBUST_CHANNEL_GR_EXCEPTION (13)
|
||||
#define ROBUST_CHANNEL_GR_ERROR_SW_NOTIFY (13)
|
||||
#define ROBUST_CHANNEL_FAKE_ERROR (14)
|
||||
#define ROBUST_CHANNEL_DISP_MISSED_NOTIFIER (19)
|
||||
#define ROBUST_CHANNEL_MPEG_ERROR_SW_METHOD (20)
|
||||
#define ROBUST_CHANNEL_ME_ERROR_SW_METHOD (21)
|
||||
#define ROBUST_CHANNEL_VP_ERROR_SW_METHOD (22)
|
||||
#define ROBUST_CHANNEL_RC_LOGGING_ENABLED (23)
|
||||
#define ROBUST_CHANNEL_VP_ERROR (27)
|
||||
#define ROBUST_CHANNEL_VP2_ERROR (28)
|
||||
#define ROBUST_CHANNEL_BSP_ERROR (29)
|
||||
#define ROBUST_CHANNEL_BAD_ADDR_ACCESS (30)
|
||||
#define ROBUST_CHANNEL_FIFO_ERROR_MMU_ERR_FLT (31)
|
||||
#define ROBUST_CHANNEL_PBDMA_ERROR (32)
|
||||
#define ROBUST_CHANNEL_SEC_ERROR (33)
|
||||
#define ROBUST_CHANNEL_MSVLD_ERROR (34)
|
||||
#define ROBUST_CHANNEL_MSPDEC_ERROR (35)
|
||||
#define ROBUST_CHANNEL_MSPPP_ERROR (36)
|
||||
#define ROBUST_CHANNEL_CE0_ERROR (39)
|
||||
#define ROBUST_CHANNEL_CE1_ERROR (40)
|
||||
#define ROBUST_CHANNEL_CE2_ERROR (41)
|
||||
#define ROBUST_CHANNEL_VIC_ERROR (42)
|
||||
#define ROBUST_CHANNEL_RESETCHANNEL_VERIF_ERROR (43)
|
||||
#define ROBUST_CHANNEL_GR_FAULT_DURING_CTXSW (44)
|
||||
#define ROBUST_CHANNEL_PREEMPTIVE_REMOVAL (45)
|
||||
#define ROBUST_CHANNEL_NVENC0_ERROR (47)
|
||||
#define ROBUST_CHANNEL_GPU_ECC_DBE (48)
|
||||
#define PMU_ERROR (59)
|
||||
#define ROBUST_CHANNEL_SEC2_ERROR (60)
|
||||
#define PMU_BREAKPOINT (61)
|
||||
#define PMU_HALT_ERROR (62)
|
||||
#define INFOROM_PAGE_RETIREMENT_EVENT (63)
|
||||
#define INFOROM_PAGE_RETIREMENT_FAILURE (64)
|
||||
#define INFOROM_DRAM_RETIREMENT_EVENT (63)
|
||||
#define INFOROM_DRAM_RETIREMENT_FAILURE (64)
|
||||
#define ROBUST_CHANNEL_NVENC1_ERROR (65)
|
||||
#define ROBUST_CHANNEL_NVDEC0_ERROR (68)
|
||||
#define ROBUST_CHANNEL_GR_CLASS_ERROR (69)
|
||||
#define ROBUST_CHANNEL_CE3_ERROR (70)
|
||||
#define ROBUST_CHANNEL_CE4_ERROR (71)
|
||||
#define ROBUST_CHANNEL_CE5_ERROR (72)
|
||||
#define ROBUST_CHANNEL_NVENC2_ERROR (73)
|
||||
#define NVLINK_ERROR (74)
|
||||
#define ROBUST_CHANNEL_CE6_ERROR (75)
|
||||
#define ROBUST_CHANNEL_CE7_ERROR (76)
|
||||
#define ROBUST_CHANNEL_CE8_ERROR (77)
|
||||
#define VGPU_START_ERROR (78)
|
||||
#define ROBUST_CHANNEL_GPU_HAS_FALLEN_OFF_THE_BUS (79)
|
||||
#define PBDMA_PUSHBUFFER_CRC_MISMATCH (80)
|
||||
#define ROBUST_CHANNEL_VGA_SUBSYSTEM_ERROR (81)
|
||||
#define ROBUST_CHANNEL_NVJPG0_ERROR (82)
|
||||
#define ROBUST_CHANNEL_NVDEC1_ERROR (83)
|
||||
#define ROBUST_CHANNEL_NVDEC2_ERROR (84)
|
||||
#define ROBUST_CHANNEL_CE9_ERROR (85)
|
||||
#define ROBUST_CHANNEL_OFA0_ERROR (86)
|
||||
#define NVTELEMETRY_DRIVER_REPORT (87)
|
||||
#define ROBUST_CHANNEL_NVDEC3_ERROR (88)
|
||||
#define ROBUST_CHANNEL_NVDEC4_ERROR (89)
|
||||
#define LTC_ERROR (90)
|
||||
#define RESERVED_XID (91)
|
||||
#define EXCESSIVE_SBE_INTERRUPTS (92)
|
||||
#define INFOROM_ERASE_LIMIT_EXCEEDED (93)
|
||||
#define ROBUST_CHANNEL_CONTAINED_ERROR (94)
|
||||
#define ROBUST_CHANNEL_UNCONTAINED_ERROR (95)
|
||||
#define SEC_FAULT_ERROR (110)
|
||||
#define GSP_RPC_TIMEOUT (119)
|
||||
#define GSP_ERROR (120)
|
||||
#define C2C_ERROR (121)
|
||||
#define SPI_PMU_RPC_READ_FAIL (122)
|
||||
#define SPI_PMU_RPC_WRITE_FAIL (123)
|
||||
#define SPI_PMU_RPC_ERASE_FAIL (124)
|
||||
#define INFOROM_FS_ERROR (125)
|
||||
#define ROBUST_CHANNEL_LAST_ERROR (INFOROM_FS_ERROR)
|
||||
|
||||
|
||||
// Indexed CE reference
|
||||
#define ROBUST_CHANNEL_CE_ERROR(x) \
|
||||
(x < 3 ? ROBUST_CHANNEL_CE0_ERROR + (x) : \
|
||||
((x < 6) ? (ROBUST_CHANNEL_CE3_ERROR + (x - 3)) : \
|
||||
((x < 9) ? (ROBUST_CHANNEL_CE6_ERROR + (x - 6)) : \
|
||||
ROBUST_CHANNEL_CE9_ERROR)))
|
||||
|
||||
#define ROBUST_CHANNEL_IS_CE_ERROR(x) \
|
||||
((x == ROBUST_CHANNEL_CE0_ERROR) || (x == ROBUST_CHANNEL_CE1_ERROR) || \
|
||||
(x == ROBUST_CHANNEL_CE2_ERROR) || (x == ROBUST_CHANNEL_CE3_ERROR) || \
|
||||
(x == ROBUST_CHANNEL_CE4_ERROR) || (x == ROBUST_CHANNEL_CE5_ERROR) || \
|
||||
(x == ROBUST_CHANNEL_CE6_ERROR) || (x == ROBUST_CHANNEL_CE7_ERROR) || \
|
||||
(x == ROBUST_CHANNEL_CE8_ERROR) || (x == ROBUST_CHANNEL_CE9_ERROR))
|
||||
|
||||
#define ROBUST_CHANNEL_CE_ERROR_IDX(x) \
|
||||
(((x >= ROBUST_CHANNEL_CE0_ERROR) && (x <= ROBUST_CHANNEL_CE2_ERROR)) ? \
|
||||
(x - ROBUST_CHANNEL_CE0_ERROR) : \
|
||||
(((x >= ROBUST_CHANNEL_CE3_ERROR) && \
|
||||
(x <= ROBUST_CHANNEL_CE5_ERROR)) ? \
|
||||
(x - ROBUST_CHANNEL_CE3_ERROR) : \
|
||||
(((x >= ROBUST_CHANNEL_CE6_ERROR) && \
|
||||
(x <= ROBUST_CHANNEL_CE8_ERROR)) ? \
|
||||
(x - ROBUST_CHANNEL_CE6_ERROR) : \
|
||||
(x - ROBUST_CHANNEL_CE9_ERROR))))
|
||||
|
||||
// Indexed NVDEC reference
|
||||
#define ROBUST_CHANNEL_NVDEC_ERROR(x) \
|
||||
((x == 0) ? \
|
||||
(ROBUST_CHANNEL_NVDEC0_ERROR) : \
|
||||
(((x >= 1) && (x <= 2)) ? (ROBUST_CHANNEL_NVDEC1_ERROR + x - 1) : \
|
||||
(ROBUST_CHANNEL_NVDEC3_ERROR + x - 3)))
|
||||
|
||||
#define ROBUST_CHANNEL_IS_NVDEC_ERROR(x) \
|
||||
((x == ROBUST_CHANNEL_NVDEC0_ERROR) || \
|
||||
(x == ROBUST_CHANNEL_NVDEC1_ERROR) || \
|
||||
(x == ROBUST_CHANNEL_NVDEC2_ERROR) || \
|
||||
(x == ROBUST_CHANNEL_NVDEC3_ERROR) || \
|
||||
(x == ROBUST_CHANNEL_NVDEC4_ERROR))
|
||||
|
||||
#define ROBUST_CHANNEL_NVDEC_ERROR_IDX(x) \
|
||||
(((x == ROBUST_CHANNEL_NVDEC0_ERROR)) ? \
|
||||
(x - ROBUST_CHANNEL_NVDEC0_ERROR) : \
|
||||
(((x >= ROBUST_CHANNEL_NVDEC1_ERROR) && \
|
||||
(x <= ROBUST_CHANNEL_NVDEC2_ERROR)) ? \
|
||||
(x - ROBUST_CHANNEL_NVDEC1_ERROR + 1) : \
|
||||
(x - ROBUST_CHANNEL_NVDEC3_ERROR + 3)))
|
||||
|
||||
// Indexed NVENC reference
|
||||
#define ROBUST_CHANNEL_NVENC_ERROR(x) \
|
||||
((x == 0) ? (ROBUST_CHANNEL_NVENC0_ERROR) : \
|
||||
((x == 1) ? (ROBUST_CHANNEL_NVENC1_ERROR) : \
|
||||
(ROBUST_CHANNEL_NVENC2_ERROR)))
|
||||
|
||||
#define ROBUST_CHANNEL_IS_NVENC_ERROR(x) \
|
||||
((x == ROBUST_CHANNEL_NVENC0_ERROR) || \
|
||||
(x == ROBUST_CHANNEL_NVENC1_ERROR) || \
|
||||
(x == ROBUST_CHANNEL_NVENC2_ERROR))
|
||||
|
||||
#define ROBUST_CHANNEL_NVENC_ERROR_IDX(x) \
|
||||
(((x == ROBUST_CHANNEL_NVENC0_ERROR)) ? \
|
||||
(x - ROBUST_CHANNEL_NVENC0_ERROR) : \
|
||||
(((x == ROBUST_CHANNEL_NVENC1_ERROR)) ? \
|
||||
(x - ROBUST_CHANNEL_NVENC1_ERROR + 1) : \
|
||||
(x - ROBUST_CHANNEL_NVENC2_ERROR + 2)))
|
||||
|
||||
// Error Levels
|
||||
#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_INFO (0)
|
||||
#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_NON_FATAL (1)
|
||||
#define ROBUST_CHANNEL_ERROR_RECOVERY_LEVEL_FATAL (2)
|
||||
|
||||
#define ROBUST_CHANNEL_ERROR_STR_PUBLIC_PUBLISHED \
|
||||
{"Unknown Error", \
|
||||
"DMA Engine Error (FIFO Error 1)", \
|
||||
"DMA Engine Error (FIFO Error 2)", \
|
||||
"DMA Engine Error (FIFO Error 3)", \
|
||||
"DMA Engine Error (FIFO Error 4)", \
|
||||
"DMA Engine Error (FIFO Error 5)", \
|
||||
"DMA Engine Error (FIFO Error 6)", \
|
||||
"DMA Engine Error (FIFO Error 7)", \
|
||||
"DMA Engine Error (FIFO Error 8)", \
|
||||
"Graphics Engine Error (GR Error 1)", \
|
||||
"Graphics Engine Error (GR Error 2)", \
|
||||
"Graphics Engine Error (GR Error 3)", \
|
||||
"Graphics Engine Error (GR Error 4)", \
|
||||
"Graphics Engine Error (GR Exception Error)",\
|
||||
"Fake Error", \
|
||||
"Display Engine Error (CRTC Error 1)", \
|
||||
"Display Engine Error (CRTC Error 2)", \
|
||||
"Display Engine Error (CRTC Error 3)", \
|
||||
"Bus Interface Error (BIF Error)", \
|
||||
"Client Reported Error", \
|
||||
"Video Engine Error (MPEG Error)", \
|
||||
"Video Engine Error (ME Error)", \
|
||||
"Video Engine Error (VP Error 1)", \
|
||||
"Error Reporting Enabled", \
|
||||
"Graphics Engine Error (GR Error 6)", \
|
||||
"Graphics Engine Error (GR Error 7)", \
|
||||
"DMA Engine Error (FIFO Error 9)", \
|
||||
"Video Engine Error (VP Error 2)", \
|
||||
"Video Engine Error (VP2 Error)", \
|
||||
"Video Engine Error (BSP Error)", \
|
||||
"Access Violation Error (MMU Error 1)", \
|
||||
"Access Violation Error (MMU Error 2)", \
|
||||
"DMA Engine Error (PBDMA Error)", \
|
||||
"Security Engine Error (SEC Error)", \
|
||||
"Video Engine Error (MSVLD Error)", \
|
||||
"Video Engine Error (MSPDEC Error)", \
|
||||
"Video Engine Error (MSPPP Error)", \
|
||||
"Graphics Engine Error (FECS Error 1)", \
|
||||
"Graphics Engine Error (FECS Error 2)", \
|
||||
"DMA Engine Error (CE Error 1)", \
|
||||
"DMA Engine Error (CE Error 2)", \
|
||||
"DMA Engine Error (CE Error 3)", \
|
||||
"Video Engine Error (VIC Error)", \
|
||||
"Verification Error", \
|
||||
"Access Violation Error (MMU Error 3)", \
|
||||
"Operating System Error (OS Error 1)", \
|
||||
"Operating System Error (OS Error 2)", \
|
||||
"Video Engine Error (MSENC/NVENC0 Error)",\
|
||||
"ECC Error (DBE Error)", \
|
||||
"Power State Locked", \
|
||||
"Power State Event (RC Error)", \
|
||||
"Power State Event (Stress Test Error)", \
|
||||
"Power State Event (Thermal Event 1)", \
|
||||
"Power State Event (Thermal Event 2)", \
|
||||
"Power State Event (Power Event)", \
|
||||
"Power State Event (Thermal Event 3)", \
|
||||
"Display Engine Error (EVO Error)", \
|
||||
"FB Interface Error (FBPA Error 1)", \
|
||||
"FB Interface Error (FBPA Error 2)", \
|
||||
"PMU error", \
|
||||
"SEC2 error", \
|
||||
"PMU Breakpoint (non-fatal)", \
|
||||
"PMU Halt Error", \
|
||||
"INFOROM Page Retirement Event", \
|
||||
"INFOROM Page Retirement Failure", \
|
||||
"Video Engine Error (NVENC1 Error)", \
|
||||
"Graphics Engine Error (FECS Error 3)", \
|
||||
"Graphics Engine Error (FECS Error 4)", \
|
||||
"Video Engine Error (NVDEC0 Error)", \
|
||||
"Graphics Engine Error (GR Class Error)",\
|
||||
"DMA Engine Error (CE Error 4)", \
|
||||
"DMA Engine Error (CE Error 5)", \
|
||||
"DMA Engine Error (CE Error 6)", \
|
||||
"Video Engine Error (NVENC2 Error)", \
|
||||
"NVLink Error", \
|
||||
"DMA Engine Error (CE Error 6)", \
|
||||
"DMA Engine Error (CE Error 7)", \
|
||||
"DMA Engine Error (CE Error 8)", \
|
||||
"vGPU device cannot be started", \
|
||||
"GPU has fallen off the bus", \
|
||||
"DMA Engine Error (Pushbuffer CRC mismatch)",\
|
||||
"VGA Subsystem Error", \
|
||||
"Video JPEG Engine Error (NVJPG Error)", \
|
||||
"Video Engine Error (NVDEC1 Error)", \
|
||||
"Video Engine Error (NVDEC2 Error)", \
|
||||
"DMA Engine Error (CE Error 9)", \
|
||||
"Video OFA Engine Error (OFA0 Error)", \
|
||||
"NvTelemetry Driver Reoprt", \
|
||||
"Video Engine Error (NVDEC3 Error)", \
|
||||
"Video Engine Error (NVDEC4 Error)", \
|
||||
"FB Interface Error (FBPA Error 3)", \
|
||||
"Reserved Xid", \
|
||||
"Excessive SBE interrupts", \
|
||||
"INFOROM Erase Limit Exceeded", \
|
||||
"Contained error", \
|
||||
"Uncontained error"
|
||||
|
||||
#define ROBUST_CHANNEL_ERROR_STR_PUBLIC \
|
||||
ROBUST_CHANNEL_ERROR_STR_PUBLIC_PUBLISHED}
|
||||
|
||||
#endif // NVERROR_H
|
||||
@@ -1,58 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2014-2015 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __NVKMS_MODESET_H__
|
||||
#define __NVKMS_MODESET_H__
|
||||
|
||||
#include "nvkms-types.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
NvBool nvSetDispModeEvo(NVDevEvoPtr pDevEvo,
|
||||
const struct NvKmsPerOpenDev *pOpenDev,
|
||||
const struct NvKmsSetModeRequest *pRequest,
|
||||
struct NvKmsSetModeReply *pReply,
|
||||
NvBool bypassComposition,
|
||||
NvBool doRasterLock);
|
||||
|
||||
typedef NvBool (*NVShutDownHeadsTestFunc)(
|
||||
const NVDispEvoRec *pDispEvo,
|
||||
const NvU32 head);
|
||||
|
||||
void nvShutDownHeads(NVDevEvoPtr pDevEvo, NVShutDownHeadsTestFunc pTestFunc);
|
||||
|
||||
NVVBlankCallbackPtr nvRegisterVBlankCallback(NVDispEvoPtr pDispEvo,
|
||||
NvU32 head,
|
||||
NVVBlankCallbackProc pCallback,
|
||||
void *pUserData);
|
||||
void nvUnregisterVBlankCallback(NVDispEvoPtr pDispEvo,
|
||||
NvU32 head,
|
||||
NVVBlankCallbackPtr pCallback);
|
||||
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
#endif
|
||||
|
||||
#endif /* __NVKMS_MODESET_H__ */
|
||||
@@ -1,150 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvidia-modeset-os-interface.h"
|
||||
|
||||
#include "nvkms-rmapi.h"
|
||||
|
||||
#include "nvkms-kapi.h"
|
||||
#include "nvkms-kapi-private.h"
|
||||
#include "nvkms-kapi-internal.h"
|
||||
|
||||
#include "class/cl0005.h"
|
||||
|
||||
struct NvKmsKapiChannelEvent {
|
||||
struct NvKmsKapiDevice *device;
|
||||
|
||||
NvKmsChannelEventProc *proc;
|
||||
void *data;
|
||||
|
||||
struct NvKmsKapiPrivAllocateChannelEventParams nvKmsParams;
|
||||
|
||||
NvHandle hCallback;
|
||||
NVOS10_EVENT_KERNEL_CALLBACK_EX rmCallback;
|
||||
};
|
||||
|
||||
static void ChannelEventHandler(void *arg1, void *arg2, NvHandle hEvent,
|
||||
NvU32 data, NvU32 status)
|
||||
{
|
||||
struct NvKmsKapiChannelEvent *cb = arg1;
|
||||
cb->proc(cb->data, 0);
|
||||
}
|
||||
|
||||
struct NvKmsKapiChannelEvent* nvKmsKapiAllocateChannelEvent
|
||||
(
|
||||
struct NvKmsKapiDevice *device,
|
||||
NvKmsChannelEventProc *proc,
|
||||
void *data,
|
||||
NvU64 nvKmsParamsUser,
|
||||
NvU64 nvKmsParamsSize
|
||||
)
|
||||
{
|
||||
int status;
|
||||
NvU32 ret;
|
||||
|
||||
struct NvKmsKapiChannelEvent *cb = NULL;
|
||||
NV0005_ALLOC_PARAMETERS eventParams = { };
|
||||
|
||||
if (device == NULL || proc == NULL) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cb = nvKmsKapiCalloc(1, sizeof(*cb));
|
||||
if (cb == NULL) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Verify the driver-private params size and copy it in from userspace */
|
||||
|
||||
if (nvKmsParamsSize != sizeof(cb->nvKmsParams)) {
|
||||
nvKmsKapiLogDebug(
|
||||
"NVKMS private memory import parameter size mismatch - "
|
||||
"expected: 0x%llx, caller specified: 0x%llx",
|
||||
(NvU64)sizeof(cb->nvKmsParams), nvKmsParamsSize);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
status = nvkms_copyin(&cb->nvKmsParams,
|
||||
nvKmsParamsUser, sizeof(cb->nvKmsParams));
|
||||
if (status != 0) {
|
||||
nvKmsKapiLogDebug(
|
||||
"NVKMS private memory import parameters could not be read from "
|
||||
"userspace");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cb->device = device;
|
||||
|
||||
cb->proc = proc;
|
||||
cb->data = data;
|
||||
|
||||
cb->rmCallback.func = ChannelEventHandler;
|
||||
cb->rmCallback.arg = cb;
|
||||
|
||||
cb->hCallback = nvGenerateUnixRmHandle(&device->handleAllocator);
|
||||
if (cb->hCallback == 0x0) {
|
||||
nvKmsKapiLogDeviceDebug(device,
|
||||
"Failed to allocate event callback handle");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
eventParams.hParentClient = cb->nvKmsParams.hClient;
|
||||
eventParams.hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
|
||||
eventParams.notifyIndex = 0;
|
||||
eventParams.data = NV_PTR_TO_NvP64(&cb->rmCallback);
|
||||
|
||||
ret = nvRmApiAlloc(device->hRmClient,
|
||||
cb->nvKmsParams.hChannel,
|
||||
cb->hCallback,
|
||||
NV01_EVENT_KERNEL_CALLBACK_EX,
|
||||
&eventParams);
|
||||
if (ret != NVOS_STATUS_SUCCESS) {
|
||||
nvKmsKapiLogDeviceDebug(device, "Failed to allocate event callback");
|
||||
nvFreeUnixRmHandle(&device->handleAllocator, cb->hCallback);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return cb;
|
||||
fail:
|
||||
nvKmsKapiFree(cb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void nvKmsKapiFreeChannelEvent
|
||||
(
|
||||
struct NvKmsKapiDevice *device,
|
||||
struct NvKmsKapiChannelEvent *cb
|
||||
)
|
||||
{
|
||||
if (device == NULL || cb == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
nvRmApiFree(device->hRmClient,
|
||||
device->hRmClient,
|
||||
cb->hCallback);
|
||||
|
||||
nvFreeUnixRmHandle(&device->handleAllocator, cb->hCallback);
|
||||
|
||||
nvKmsKapiFree(cb);
|
||||
}
|
||||
@@ -1,391 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2014 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvkms-lut.h"
|
||||
#include "nvkms-rm.h"
|
||||
#include "nvkms-rmapi.h"
|
||||
#include "nvkms-dma.h"
|
||||
#include "nvkms-utils.h"
|
||||
#include "nvos.h"
|
||||
|
||||
#include <class/cl0040.h> /* NV01_MEMORY_LOCAL_USER */
|
||||
|
||||
static void FreeLutSurfaceEvoInVidmem(NVLutSurfaceEvoPtr pSurfEvo)
|
||||
{
|
||||
NVDevEvoPtr pDevEvo;
|
||||
|
||||
if (pSurfEvo == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
pDevEvo = pSurfEvo->pDevEvo;
|
||||
|
||||
nvRmEvoUnMapVideoMemory(pDevEvo, pSurfEvo->handle,
|
||||
pSurfEvo->subDeviceAddress);
|
||||
|
||||
/* Free display context dmas for the surface, if any */
|
||||
nvRmEvoFreeDispContextDMA(pDevEvo, &pSurfEvo->dispCtxDma);
|
||||
|
||||
/* Free the surface */
|
||||
if (pSurfEvo->handle) {
|
||||
NvU32 result;
|
||||
|
||||
result = nvRmApiFree(nvEvoGlobal.clientHandle,
|
||||
pDevEvo->deviceHandle, pSurfEvo->handle);
|
||||
if (result != NVOS_STATUS_SUCCESS) {
|
||||
nvAssert(!"Freeing LUT surface failed");
|
||||
}
|
||||
|
||||
nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
|
||||
pSurfEvo->handle);
|
||||
pSurfEvo->handle = 0;
|
||||
}
|
||||
|
||||
nvFree(pSurfEvo);
|
||||
}
|
||||
|
||||
static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInVidmem(NVDevEvoPtr pDevEvo)
|
||||
{
|
||||
NV_MEMORY_ALLOCATION_PARAMS memAllocParams = { };
|
||||
NvU32 ret = NVOS_STATUS_ERROR_GENERIC;
|
||||
NvU32 attr = 0, attr2 = 0;
|
||||
NvU32 allocFlags = NVOS32_ALLOC_FLAGS_FORCE_MEM_GROWS_DOWN |
|
||||
NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE;
|
||||
NvU64 size = 0, alignment = 4096;
|
||||
|
||||
NVLutSurfaceEvoPtr pSurfEvo;
|
||||
|
||||
pSurfEvo = nvCalloc(1, sizeof(*pSurfEvo));
|
||||
if (pSurfEvo == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pSurfEvo->pDevEvo = pDevEvo;
|
||||
|
||||
size = (sizeof(NVEvoLutDataRec) + 63) & ~63;
|
||||
|
||||
pSurfEvo->size = size;
|
||||
|
||||
pSurfEvo->handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
|
||||
|
||||
if (pSurfEvo->handle == 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
attr = FLD_SET_DRF(OS32, _ATTR, _LOCATION, _VIDMEM, attr);
|
||||
attr2 = FLD_SET_DRF(OS32, _ATTR2, _GPU_CACHEABLE, _DEFAULT, attr2);
|
||||
|
||||
alignment = NV_MAX(alignment, NV_EVO_SURFACE_ALIGNMENT);
|
||||
if (alignment != 0) {
|
||||
allocFlags |= NVOS32_ALLOC_FLAGS_ALIGNMENT_FORCE;
|
||||
}
|
||||
|
||||
memAllocParams.owner = NVKMS_RM_HEAP_ID;
|
||||
memAllocParams.type = NVOS32_TYPE_IMAGE;
|
||||
memAllocParams.size = size;
|
||||
memAllocParams.attr = attr;
|
||||
memAllocParams.attr2 = attr2;
|
||||
memAllocParams.flags = allocFlags;
|
||||
memAllocParams.alignment = alignment;
|
||||
|
||||
ret = nvRmApiAlloc(nvEvoGlobal.clientHandle,
|
||||
pDevEvo->deviceHandle,
|
||||
pSurfEvo->handle,
|
||||
NV01_MEMORY_LOCAL_USER,
|
||||
&memAllocParams);
|
||||
|
||||
/* If we failed the allocation above, abort */
|
||||
if (ret != NVOS_STATUS_SUCCESS) {
|
||||
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSurfEvo->handle);
|
||||
pSurfEvo->handle = 0;
|
||||
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Allocate a display context dma */
|
||||
pSurfEvo->dispCtxDma =
|
||||
nvRmEvoAllocateAndBindDispContextDMA(pDevEvo,
|
||||
pSurfEvo->handle,
|
||||
NvKmsSurfaceMemoryLayoutPitch,
|
||||
pSurfEvo->size - 1);
|
||||
|
||||
if (!pSurfEvo->dispCtxDma) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Map the surface for the CPU */
|
||||
if (!nvRmEvoMapVideoMemory(pSurfEvo->pDevEvo,
|
||||
pSurfEvo->handle, pSurfEvo->size,
|
||||
pSurfEvo->subDeviceAddress,
|
||||
SUBDEVICE_MASK_ALL)) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return pSurfEvo;
|
||||
|
||||
fail:
|
||||
/* An error occurred -- free the surface */
|
||||
FreeLutSurfaceEvoInVidmem(pSurfEvo);
|
||||
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
||||
static void FreeLutSurfaceEvoInSysmem(NVLutSurfaceEvoPtr pSurfEvo)
|
||||
{
|
||||
NVDevEvoPtr pDevEvo;
|
||||
|
||||
if (pSurfEvo == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
pDevEvo = pSurfEvo->pDevEvo;
|
||||
|
||||
/* Free display context dmas for the surface, if any */
|
||||
nvRmEvoFreeDispContextDMA(pDevEvo, &pSurfEvo->dispCtxDma);
|
||||
|
||||
/* Free the surface */
|
||||
if (pSurfEvo->handle) {
|
||||
NvU32 result;
|
||||
|
||||
if (pSurfEvo->subDeviceAddress[0] != NULL) {
|
||||
/*
|
||||
* SOC display devices should only have one subdevice
|
||||
* (and therefore it is safe to unmap only subDeviceAddress[0])
|
||||
* for reasons described in AllocLutSurfaceEvoInSysmem
|
||||
*/
|
||||
nvAssert(pDevEvo->numSubDevices == 1);
|
||||
|
||||
result = nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
|
||||
pDevEvo->deviceHandle,
|
||||
pSurfEvo->handle,
|
||||
pSurfEvo->subDeviceAddress[0],
|
||||
0);
|
||||
if (result != NVOS_STATUS_SUCCESS) {
|
||||
nvAssert(!"Unmapping LUT surface failed");
|
||||
}
|
||||
pSurfEvo->subDeviceAddress[0] = NULL;
|
||||
}
|
||||
|
||||
result = nvRmApiFree(nvEvoGlobal.clientHandle,
|
||||
pDevEvo->deviceHandle, pSurfEvo->handle);
|
||||
if (result != NVOS_STATUS_SUCCESS) {
|
||||
nvAssert(!"Freeing LUT surface failed");
|
||||
}
|
||||
|
||||
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, pSurfEvo->handle);
|
||||
}
|
||||
|
||||
nvFree(pSurfEvo);
|
||||
}
|
||||
|
||||
static NVLutSurfaceEvoPtr AllocLutSurfaceEvoInSysmem(NVDevEvoPtr pDevEvo)
|
||||
{
|
||||
NvU32 memoryHandle = 0;
|
||||
void *pBase = NULL;
|
||||
NvU64 size = 0;
|
||||
NVLutSurfaceEvoPtr pSurfEvo;
|
||||
|
||||
pSurfEvo = nvCalloc(1, sizeof(*pSurfEvo));
|
||||
if (pSurfEvo == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pSurfEvo->pDevEvo = pDevEvo;
|
||||
|
||||
size = (sizeof(NVEvoLutDataRec) + 63) & ~63;
|
||||
|
||||
pSurfEvo->size = size;
|
||||
|
||||
memoryHandle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
|
||||
if (memoryHandle == 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Allocate the LUT memory from sysmem */
|
||||
if (!nvRmAllocSysmem(pDevEvo, memoryHandle, NULL, &pBase, size,
|
||||
NVKMS_MEMORY_ISO)) {
|
||||
nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
|
||||
"Unable to allocate LUT memory from sysmem");
|
||||
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, memoryHandle);
|
||||
|
||||
goto fail;
|
||||
}
|
||||
|
||||
pSurfEvo->handle = memoryHandle;
|
||||
|
||||
/* Allocate and bind a display context dma */
|
||||
pSurfEvo->dispCtxDma =
|
||||
nvRmEvoAllocateAndBindDispContextDMA(pDevEvo,
|
||||
pSurfEvo->handle,
|
||||
NvKmsSurfaceMemoryLayoutPitch,
|
||||
pSurfEvo->size - 1);
|
||||
if (!pSurfEvo->dispCtxDma) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* AllocLutSurfaceEvoInSysmem() will only be called if
|
||||
* pDevEvo->requiresAllAllocationsInSysmem is TRUE. NVKMS will only set this
|
||||
* cap bit for SOC display devices, and these devices should only have one
|
||||
* subdevice.
|
||||
*/
|
||||
nvAssert(pDevEvo->numSubDevices == 1);
|
||||
pSurfEvo->subDeviceAddress[0] = pBase;
|
||||
|
||||
return pSurfEvo;
|
||||
|
||||
fail:
|
||||
/* An error occurred -- free the surface */
|
||||
FreeLutSurfaceEvoInSysmem(pSurfEvo);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void FreeLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo)
|
||||
{
|
||||
NVDevEvoPtr pDevEvo;
|
||||
|
||||
if (pSurfEvo == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
pDevEvo = pSurfEvo->pDevEvo;
|
||||
|
||||
if (pDevEvo->requiresAllAllocationsInSysmem) {
|
||||
FreeLutSurfaceEvoInSysmem(pSurfEvo);
|
||||
} else {
|
||||
FreeLutSurfaceEvoInVidmem(pSurfEvo);
|
||||
}
|
||||
}
|
||||
|
||||
static NVLutSurfaceEvoPtr AllocLutSurfaceEvo(NVDevEvoPtr pDevEvo)
|
||||
{
|
||||
if (pDevEvo->requiresAllAllocationsInSysmem) {
|
||||
return AllocLutSurfaceEvoInSysmem(pDevEvo);
|
||||
} else {
|
||||
return AllocLutSurfaceEvoInVidmem(pDevEvo);
|
||||
}
|
||||
}
|
||||
|
||||
NvBool nvAllocLutSurfacesEvo(NVDevEvoPtr pDevEvo)
|
||||
{
|
||||
NVDispEvoPtr pDispEvo;
|
||||
NvU32 head, dispIndex, i;
|
||||
|
||||
for (head = 0; head < pDevEvo->numHeads; head++) {
|
||||
for (i = 0; i < ARRAY_LEN(pDevEvo->lut.head[head].LUT); i++) {
|
||||
pDevEvo->lut.head[head].LUT[i] = AllocLutSurfaceEvo(pDevEvo);
|
||||
|
||||
if (pDevEvo->lut.head[head].LUT[i] == NULL) {
|
||||
nvFreeLutSurfacesEvo(pDevEvo);
|
||||
return FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
|
||||
// No palette has been loaded yet, so disable the LUT.
|
||||
pDevEvo->lut.head[head].disp[dispIndex].waitForPreviousUpdate = FALSE;
|
||||
pDevEvo->lut.head[head].disp[dispIndex].curBaseLutEnabled = FALSE;
|
||||
pDevEvo->lut.head[head].disp[dispIndex].curOutputLutEnabled = FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
if (pDevEvo->hal->caps.needDefaultLutSurface) {
|
||||
pDevEvo->lut.defaultLut = AllocLutSurfaceEvo(pDevEvo);
|
||||
if (pDevEvo->lut.defaultLut == NULL) {
|
||||
nvFreeLutSurfacesEvo(pDevEvo);
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
pDevEvo->hal->InitDefaultLut(pDevEvo);
|
||||
}
|
||||
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
void nvFreeLutSurfacesEvo(NVDevEvoPtr pDevEvo)
|
||||
{
|
||||
NvU32 head, i, dispIndex;
|
||||
NVDispEvoPtr pDispEvo;
|
||||
|
||||
/* Cancel any queued LUT update timers */
|
||||
FOR_ALL_EVO_DISPLAYS(pDispEvo, dispIndex, pDevEvo) {
|
||||
for (head = 0; head < pDevEvo->numHeads; head++) {
|
||||
nvCancelLutUpdateEvo(pDispEvo, head);
|
||||
}
|
||||
}
|
||||
|
||||
/* wait for any outstanding LUT updates before freeing the surface */
|
||||
if (pDevEvo->core) {
|
||||
nvRMSyncEvoChannel(pDevEvo, pDevEvo->core, __LINE__);
|
||||
}
|
||||
|
||||
if (pDevEvo->lut.defaultLut != NULL) {
|
||||
FreeLutSurfaceEvo(pDevEvo->lut.defaultLut);
|
||||
pDevEvo->lut.defaultLut = NULL;
|
||||
}
|
||||
|
||||
for (head = 0; head < pDevEvo->numHeads; head++) {
|
||||
for (i = 0; i < ARRAY_LEN(pDevEvo->lut.head[head].LUT); i++) {
|
||||
if (pDevEvo->lut.head[head].LUT[i] != NULL) {
|
||||
FreeLutSurfaceEvo(pDevEvo->lut.head[head].LUT[i]);
|
||||
pDevEvo->lut.head[head].LUT[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void nvUploadDataToLutSurfaceEvo(NVLutSurfaceEvoPtr pSurfEvo,
|
||||
const NVEvoLutDataRec *pLUTBuffer,
|
||||
NVDispEvoPtr pDispEvo)
|
||||
{
|
||||
const NvU32* data = (const NvU32*)pLUTBuffer;
|
||||
size_t size = sizeof(*pLUTBuffer);
|
||||
const int sd = pDispEvo->displayOwner;
|
||||
NvU32 *dst;
|
||||
const NvU32 *src;
|
||||
int dword;
|
||||
|
||||
if (pSurfEvo == NULL) {
|
||||
nvAssert(pSurfEvo);
|
||||
return;
|
||||
}
|
||||
|
||||
nvAssert(pSurfEvo->subDeviceAddress[sd]);
|
||||
|
||||
/* The size to copy should not be larger than the surface. */
|
||||
nvAssert(size <= pSurfEvo->size);
|
||||
|
||||
/* The source, destination, and size should be 4-byte aligned. */
|
||||
nvAssert((((NvUPtr)data) & 0x3) == 0);
|
||||
nvAssert((((NvUPtr)pSurfEvo->subDeviceAddress[sd]) & 0x3) == 0);
|
||||
nvAssert((size % 4) == 0);
|
||||
|
||||
src = data;
|
||||
dst = (NvU32*)pSurfEvo->subDeviceAddress[sd];
|
||||
|
||||
for (dword = 0; dword < (size/4); dword++) {
|
||||
*(dst++) = *(src++);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,177 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2015-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "nvkms-dma.h"
|
||||
#include "nvkms-evo.h"
|
||||
#include "nvkms-rm.h"
|
||||
#include "nvkms-rmapi.h"
|
||||
#include "nvkms-vrr.h"
|
||||
#include "dp/nvdp-connector-event-sink.h"
|
||||
#include "nvkms-hdmi.h"
|
||||
#include "nvkms-dpy.h"
|
||||
|
||||
#include <ctrl/ctrl0000/ctrl0000unix.h>
|
||||
|
||||
/*!
|
||||
* Allocate the VRR semaphore surface.
|
||||
*
|
||||
* Only one array of VRR semaphores is needed per "head group", which for our
|
||||
* purposes means a pDevEvo. This array is allocated when the device is
|
||||
* initialized and kept around for the lifetime of the pDevEvo.
|
||||
*/
|
||||
void nvAllocVrrEvo(NVDevEvoPtr pDevEvo)
|
||||
{
|
||||
NvU32 handle;
|
||||
NvU64 size = NVKMS_VRR_SEMAPHORE_SURFACE_SIZE;
|
||||
|
||||
/* On GPUs that support the HEAD_SET_DISPLAY_RATE method (nvdisplay), we
|
||||
* don't need a VRR semaphore surface. */
|
||||
if (pDevEvo->hal->caps.supportsDisplayRate) {
|
||||
return;
|
||||
}
|
||||
|
||||
handle = nvGenerateUnixRmHandle(&pDevEvo->handleAllocator);
|
||||
|
||||
if (nvRmAllocSysmem(pDevEvo, handle, NULL, &pDevEvo->vrr.pSemaphores,
|
||||
size, NVKMS_MEMORY_NISO)) {
|
||||
pDevEvo->vrr.semaphoreHandle = handle;
|
||||
} else {
|
||||
nvEvoLogDev(pDevEvo, EVO_LOG_ERROR,
|
||||
"Failed to allocate G-SYNC semaphore memory");
|
||||
nvFreeUnixRmHandle(&pDevEvo->handleAllocator, handle);
|
||||
}
|
||||
}
|
||||
|
||||
void nvFreeVrrEvo(NVDevEvoPtr pDevEvo)
|
||||
{
|
||||
if (pDevEvo->vrr.semaphoreHandle != 0) {
|
||||
if (pDevEvo->vrr.pSemaphores != NULL) {
|
||||
nvRmApiUnmapMemory(nvEvoGlobal.clientHandle,
|
||||
pDevEvo->deviceHandle,
|
||||
pDevEvo->vrr.semaphoreHandle,
|
||||
pDevEvo->vrr.pSemaphores,
|
||||
0);
|
||||
pDevEvo->vrr.pSemaphores = NULL;
|
||||
}
|
||||
nvRmApiFree(nvEvoGlobal.clientHandle, pDevEvo->deviceHandle,
|
||||
pDevEvo->vrr.semaphoreHandle);
|
||||
nvFreeUnixRmHandle(&pDevEvo->handleAllocator,
|
||||
pDevEvo->vrr.semaphoreHandle);
|
||||
pDevEvo->vrr.semaphoreHandle = 0;
|
||||
}
|
||||
}
|
||||
|
||||
NvBool nvExportVrrSemaphoreSurface(const NVDevEvoRec *pDevEvo, int fd)
|
||||
{
|
||||
// Export the memory as an FD.
|
||||
NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TO_FD_PARAMS exportParams = { };
|
||||
const NvU32 hMemory = pDevEvo->vrr.semaphoreHandle;
|
||||
NvU32 status;
|
||||
|
||||
if (hMemory == 0) {
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
exportParams.fd = fd;
|
||||
exportParams.object.type = NV0000_CTRL_OS_UNIX_EXPORT_OBJECT_TYPE_RM;
|
||||
exportParams.object.data.rmObject.hDevice = pDevEvo->deviceHandle;
|
||||
exportParams.object.data.rmObject.hObject = hMemory;
|
||||
|
||||
status = nvRmApiControl(nvEvoGlobal.clientHandle,
|
||||
nvEvoGlobal.clientHandle,
|
||||
NV0000_CTRL_CMD_OS_UNIX_EXPORT_OBJECT_TO_FD,
|
||||
&exportParams, sizeof(exportParams));
|
||||
|
||||
return status == NVOS_STATUS_SUCCESS;
|
||||
}
|
||||
|
||||
NvBool nvDispSupportsVrr(
|
||||
const NVDispEvoRec *pDispEvo)
|
||||
{
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
void nvDisableVrr(NVDevEvoPtr pDevEvo)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void nvGetDpyMinRefreshRateValidValues(
|
||||
const NVHwModeTimingsEvo *pTimings,
|
||||
const enum NvKmsDpyVRRType vrrType,
|
||||
const NvU32 edidTimeoutMicroseconds,
|
||||
NvU32 *minMinRefreshRate,
|
||||
NvU32 *maxMinRefreshRate)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void nvEnableVrr(
|
||||
NVDevEvoPtr pDevEvo,
|
||||
const struct NvKmsSetModeRequest *pRequest)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void nvSetVrrActive(
|
||||
NVDevEvoPtr pDevEvo,
|
||||
NvBool active)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void nvApplyVrrBaseFlipOverrides(
|
||||
const NVDispEvoRec *pDispEvo,
|
||||
NvU32 head,
|
||||
const NVFlipChannelEvoHwState *pOld,
|
||||
NVFlipChannelEvoHwState *pNew)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void nvCancelVrrFrameReleaseTimers(
|
||||
NVDevEvoPtr pDevEvo)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void nvSetNextVrrFlipTypeAndIndex(
|
||||
NVDevEvoPtr pDevEvo,
|
||||
struct NvKmsFlipReply *reply)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void nvTriggerVrrUnstallMoveCursor(
|
||||
NVDispEvoPtr pDispEvo)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void nvTriggerVrrUnstallSetCursorImage(
|
||||
NVDispEvoPtr pDispEvo,
|
||||
NvBool ctxDmaChanged)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1,241 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1999-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Os interface definitions needed by os-interface.c
|
||||
*/
|
||||
|
||||
#ifndef OS_INTERFACE_H
|
||||
#define OS_INTERFACE_H
|
||||
|
||||
/******************* Operating System Interface Routines *******************\
|
||||
* *
|
||||
* Operating system wrapper functions used to abstract the OS. *
|
||||
* *
|
||||
\***************************************************************************/
|
||||
|
||||
#include <nvtypes.h>
|
||||
#include <nvstatus.h>
|
||||
#include "nv_stdarg.h"
|
||||
#include <nv-kernel-interface-api.h>
|
||||
#include <os/nv_memory_type.h>
|
||||
#include <nv-caps.h>
|
||||
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvU32 os_major_version;
|
||||
NvU32 os_minor_version;
|
||||
NvU32 os_build_number;
|
||||
const char * os_build_version_str;
|
||||
const char * os_build_date_plus_str;
|
||||
}os_version_info;
|
||||
|
||||
/* Each OS defines its own version of this opaque type */
|
||||
struct os_work_queue;
|
||||
|
||||
/* Each OS defines its own version of this opaque type */
|
||||
typedef struct os_wait_queue os_wait_queue;
|
||||
|
||||
/*
|
||||
* ---------------------------------------------------------------------------
|
||||
*
|
||||
* Function prototypes for OS interface.
|
||||
*
|
||||
* ---------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
NvU64 NV_API_CALL os_get_num_phys_pages (void);
|
||||
NV_STATUS NV_API_CALL os_alloc_mem (void **, NvU64);
|
||||
void NV_API_CALL os_free_mem (void *);
|
||||
NV_STATUS NV_API_CALL os_get_current_time (NvU32 *, NvU32 *);
|
||||
NvU64 NV_API_CALL os_get_current_tick (void);
|
||||
NvU64 NV_API_CALL os_get_current_tick_hr (void);
|
||||
NvU64 NV_API_CALL os_get_tick_resolution (void);
|
||||
NV_STATUS NV_API_CALL os_delay (NvU32);
|
||||
NV_STATUS NV_API_CALL os_delay_us (NvU32);
|
||||
NvU64 NV_API_CALL os_get_cpu_frequency (void);
|
||||
NvU32 NV_API_CALL os_get_current_process (void);
|
||||
void NV_API_CALL os_get_current_process_name (char *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_get_current_thread (NvU64 *);
|
||||
char* NV_API_CALL os_string_copy (char *, const char *);
|
||||
NvU32 NV_API_CALL os_string_length (const char *);
|
||||
NvU32 NV_API_CALL os_strtoul (const char *, char **, NvU32);
|
||||
NvS32 NV_API_CALL os_string_compare (const char *, const char *);
|
||||
NvS32 NV_API_CALL os_snprintf (char *, NvU32, const char *, ...);
|
||||
NvS32 NV_API_CALL os_vsnprintf (char *, NvU32, const char *, va_list);
|
||||
void NV_API_CALL os_log_error (const char *, va_list);
|
||||
void* NV_API_CALL os_mem_copy (void *, const void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_memcpy_from_user (void *, const void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_memcpy_to_user (void *, const void *, NvU32);
|
||||
void* NV_API_CALL os_mem_set (void *, NvU8, NvU32);
|
||||
NvS32 NV_API_CALL os_mem_cmp (const NvU8 *, const NvU8 *, NvU32);
|
||||
void* NV_API_CALL os_pci_init_handle (NvU32, NvU8, NvU8, NvU8, NvU16 *, NvU16 *);
|
||||
NV_STATUS NV_API_CALL os_pci_read_byte (void *, NvU32, NvU8 *);
|
||||
NV_STATUS NV_API_CALL os_pci_read_word (void *, NvU32, NvU16 *);
|
||||
NV_STATUS NV_API_CALL os_pci_read_dword (void *, NvU32, NvU32 *);
|
||||
NV_STATUS NV_API_CALL os_pci_write_byte (void *, NvU32, NvU8);
|
||||
NV_STATUS NV_API_CALL os_pci_write_word (void *, NvU32, NvU16);
|
||||
NV_STATUS NV_API_CALL os_pci_write_dword (void *, NvU32, NvU32);
|
||||
NvBool NV_API_CALL os_pci_remove_supported (void);
|
||||
void NV_API_CALL os_pci_remove (void *);
|
||||
void* NV_API_CALL os_map_kernel_space (NvU64, NvU64, NvU32);
|
||||
void NV_API_CALL os_unmap_kernel_space (void *, NvU64);
|
||||
void* NV_API_CALL os_map_user_space (NvU64, NvU64, NvU32, NvU32, void **);
|
||||
void NV_API_CALL os_unmap_user_space (void *, NvU64, void *);
|
||||
NV_STATUS NV_API_CALL os_flush_cpu_cache (void);
|
||||
NV_STATUS NV_API_CALL os_flush_cpu_cache_all (void);
|
||||
NV_STATUS NV_API_CALL os_flush_user_cache (void);
|
||||
void NV_API_CALL os_flush_cpu_write_combine_buffer(void);
|
||||
NvU8 NV_API_CALL os_io_read_byte (NvU32);
|
||||
NvU16 NV_API_CALL os_io_read_word (NvU32);
|
||||
NvU32 NV_API_CALL os_io_read_dword (NvU32);
|
||||
void NV_API_CALL os_io_write_byte (NvU32, NvU8);
|
||||
void NV_API_CALL os_io_write_word (NvU32, NvU16);
|
||||
void NV_API_CALL os_io_write_dword (NvU32, NvU32);
|
||||
NvBool NV_API_CALL os_is_administrator (void);
|
||||
NvBool NV_API_CALL os_allow_priority_override (void);
|
||||
void NV_API_CALL os_dbg_init (void);
|
||||
void NV_API_CALL os_dbg_breakpoint (void);
|
||||
void NV_API_CALL os_dbg_set_level (NvU32);
|
||||
NvU32 NV_API_CALL os_get_cpu_count (void);
|
||||
NvU32 NV_API_CALL os_get_cpu_number (void);
|
||||
void NV_API_CALL os_disable_console_access (void);
|
||||
void NV_API_CALL os_enable_console_access (void);
|
||||
NV_STATUS NV_API_CALL os_registry_init (void);
|
||||
NV_STATUS NV_API_CALL os_schedule (void);
|
||||
NV_STATUS NV_API_CALL os_alloc_spinlock (void **);
|
||||
void NV_API_CALL os_free_spinlock (void *);
|
||||
NvU64 NV_API_CALL os_acquire_spinlock (void *);
|
||||
void NV_API_CALL os_release_spinlock (void *, NvU64);
|
||||
NV_STATUS NV_API_CALL os_queue_work_item (struct os_work_queue *, void *);
|
||||
NV_STATUS NV_API_CALL os_flush_work_queue (struct os_work_queue *);
|
||||
NV_STATUS NV_API_CALL os_alloc_mutex (void **);
|
||||
void NV_API_CALL os_free_mutex (void *);
|
||||
NV_STATUS NV_API_CALL os_acquire_mutex (void *);
|
||||
NV_STATUS NV_API_CALL os_cond_acquire_mutex (void *);
|
||||
void NV_API_CALL os_release_mutex (void *);
|
||||
void* NV_API_CALL os_alloc_semaphore (NvU32);
|
||||
void NV_API_CALL os_free_semaphore (void *);
|
||||
NV_STATUS NV_API_CALL os_acquire_semaphore (void *);
|
||||
NV_STATUS NV_API_CALL os_cond_acquire_semaphore (void *);
|
||||
NV_STATUS NV_API_CALL os_release_semaphore (void *);
|
||||
NvBool NV_API_CALL os_semaphore_may_sleep (void);
|
||||
NV_STATUS NV_API_CALL os_get_version_info (os_version_info*);
|
||||
NvBool NV_API_CALL os_is_isr (void);
|
||||
NvBool NV_API_CALL os_pat_supported (void);
|
||||
void NV_API_CALL os_dump_stack (void);
|
||||
NvBool NV_API_CALL os_is_efi_enabled (void);
|
||||
NvBool NV_API_CALL os_is_xen_dom0 (void);
|
||||
NvBool NV_API_CALL os_is_vgx_hyper (void);
|
||||
NV_STATUS NV_API_CALL os_inject_vgx_msi (NvU16, NvU64, NvU32);
|
||||
NvBool NV_API_CALL os_is_grid_supported (void);
|
||||
NvU32 NV_API_CALL os_get_grid_csp_support (void);
|
||||
void NV_API_CALL os_get_screen_info (NvU64 *, NvU16 *, NvU16 *, NvU16 *, NvU16 *, NvU64, NvU64);
|
||||
void NV_API_CALL os_bug_check (NvU32, const char *);
|
||||
NV_STATUS NV_API_CALL os_lock_user_pages (void *, NvU64, void **, NvU32);
|
||||
NV_STATUS NV_API_CALL os_lookup_user_io_memory (void *, NvU64, NvU64 **, void**);
|
||||
NV_STATUS NV_API_CALL os_unlock_user_pages (NvU64, void *);
|
||||
NV_STATUS NV_API_CALL os_match_mmap_offset (void *, NvU64, NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_get_euid (NvU32 *);
|
||||
NV_STATUS NV_API_CALL os_get_smbios_header (NvU64 *pSmbsAddr);
|
||||
NV_STATUS NV_API_CALL os_get_acpi_rsdp_from_uefi (NvU32 *);
|
||||
void NV_API_CALL os_add_record_for_crashLog (void *, NvU32);
|
||||
void NV_API_CALL os_delete_record_for_crashLog (void *);
|
||||
NV_STATUS NV_API_CALL os_call_vgpu_vfio (void *, NvU32);
|
||||
NV_STATUS NV_API_CALL os_numa_memblock_size (NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_alloc_pages_node (NvS32, NvU32, NvU32, NvU64 *);
|
||||
NV_STATUS NV_API_CALL os_get_page (NvU64 address);
|
||||
NV_STATUS NV_API_CALL os_put_page (NvU64 address);
|
||||
NvU32 NV_API_CALL os_get_page_refcount (NvU64 address);
|
||||
NvU32 NV_API_CALL os_count_tail_pages (NvU64 address);
|
||||
void NV_API_CALL os_free_pages_phys (NvU64, NvU32);
|
||||
NV_STATUS NV_API_CALL os_call_nv_vmbus (NvU32, void *);
|
||||
NV_STATUS NV_API_CALL os_open_temporary_file (void **);
|
||||
void NV_API_CALL os_close_file (void *);
|
||||
NV_STATUS NV_API_CALL os_write_file (void *, NvU8 *, NvU64, NvU64);
|
||||
NV_STATUS NV_API_CALL os_read_file (void *, NvU8 *, NvU64, NvU64);
|
||||
NV_STATUS NV_API_CALL os_open_readonly_file (const char *, void **);
|
||||
NV_STATUS NV_API_CALL os_open_and_read_file (const char *, NvU8 *, NvU64);
|
||||
NvBool NV_API_CALL os_is_nvswitch_present (void);
|
||||
void NV_API_CALL os_get_random_bytes (NvU8 *, NvU16);
|
||||
NV_STATUS NV_API_CALL os_alloc_wait_queue (os_wait_queue **);
|
||||
void NV_API_CALL os_free_wait_queue (os_wait_queue *);
|
||||
void NV_API_CALL os_wait_uninterruptible (os_wait_queue *);
|
||||
void NV_API_CALL os_wait_interruptible (os_wait_queue *);
|
||||
void NV_API_CALL os_wake_up (os_wait_queue *);
|
||||
nv_cap_t* NV_API_CALL os_nv_cap_init (const char *);
|
||||
nv_cap_t* NV_API_CALL os_nv_cap_create_dir_entry (nv_cap_t *, const char *, int);
|
||||
nv_cap_t* NV_API_CALL os_nv_cap_create_file_entry (nv_cap_t *, const char *, int);
|
||||
void NV_API_CALL os_nv_cap_destroy_entry (nv_cap_t *);
|
||||
int NV_API_CALL os_nv_cap_validate_and_dup_fd(const nv_cap_t *, int);
|
||||
void NV_API_CALL os_nv_cap_close_fd (int);
|
||||
|
||||
NV_STATUS NV_API_CALL os_get_tegra_platform (NvU32 *);
|
||||
|
||||
extern NvU32 os_page_size;
|
||||
extern NvU64 os_page_mask;
|
||||
extern NvU8 os_page_shift;
|
||||
extern NvU32 os_sev_status;
|
||||
extern NvBool os_sev_enabled;
|
||||
extern NvBool os_dma_buf_enabled;
|
||||
|
||||
/*
|
||||
* ---------------------------------------------------------------------------
|
||||
*
|
||||
* Debug macros.
|
||||
*
|
||||
* ---------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
#define NV_DBG_INFO 0x0
|
||||
#define NV_DBG_SETUP 0x1
|
||||
#define NV_DBG_USERERRORS 0x2
|
||||
#define NV_DBG_WARNINGS 0x3
|
||||
#define NV_DBG_ERRORS 0x4
|
||||
|
||||
|
||||
void NV_API_CALL out_string(const char *str);
|
||||
int NV_API_CALL nv_printf(NvU32 debuglevel, const char *printf_format, ...);
|
||||
|
||||
#define NV_DEV_PRINTF(debuglevel, nv, format, ... ) \
|
||||
nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format, NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__)
|
||||
|
||||
#define NV_DEV_PRINTF_STATUS(debuglevel, nv, status, format, ... ) \
|
||||
nv_printf(debuglevel, "NVRM: GPU " NV_PCI_DEV_FMT ": " format " (0x%x)\n", NV_PCI_DEV_FMT_ARGS(nv), ## __VA_ARGS__, status)
|
||||
|
||||
/*
|
||||
* Fields for os_lock_user_pages flags parameter
|
||||
*/
|
||||
#define NV_LOCK_USER_PAGES_FLAGS_WRITE 0:0
|
||||
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_NO 0x00000000
|
||||
#define NV_LOCK_USER_PAGES_FLAGS_WRITE_YES 0x00000001
|
||||
|
||||
// NV OS Tegra platform type defines
|
||||
#define NV_OS_TEGRA_PLATFORM_SIM 0
|
||||
#define NV_OS_TEGRA_PLATFORM_FPGA 1
|
||||
#define NV_OS_TEGRA_PLATFORM_SILICON 2
|
||||
|
||||
#endif /* OS_INTERFACE_H */
|
||||
@@ -1,88 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
|
||||
/***************************** HW State Routines ***************************\
|
||||
* *
|
||||
* Fills in os specific function pointers for the Unix OS object. *
|
||||
* *
|
||||
\***************************************************************************/
|
||||
|
||||
#include <osfuncs.h>
|
||||
#include <os/os.h>
|
||||
|
||||
static void initOSSpecificFunctionPointers(OBJOS *);
|
||||
static void initMiscOSFunctionPointers(OBJOS *);
|
||||
static void initUnixOSFunctionPointers(OBJOS *);
|
||||
static void initOSSpecificProperties(OBJOS *);
|
||||
|
||||
void
|
||||
osInitObjOS(OBJOS *pOS)
|
||||
{
|
||||
initOSSpecificFunctionPointers(pOS);
|
||||
initOSSpecificProperties(pOS);
|
||||
}
|
||||
|
||||
static void
|
||||
initOSSpecificFunctionPointers(OBJOS *pOS)
|
||||
{
|
||||
initMiscOSFunctionPointers(pOS);
|
||||
initUnixOSFunctionPointers(pOS);
|
||||
}
|
||||
|
||||
static void
|
||||
initMiscOSFunctionPointers(OBJOS *pOS)
|
||||
{
|
||||
pOS->osQueueWorkItem = osQueueWorkItem;
|
||||
pOS->osQueueWorkItemWithFlags = osQueueWorkItemWithFlags;
|
||||
pOS->osQueueSystemWorkItem = osQueueSystemWorkItem;
|
||||
}
|
||||
|
||||
static void
|
||||
initUnixOSFunctionPointers(OBJOS *pOS)
|
||||
{
|
||||
#if defined(NVCPU_X86_64)
|
||||
pOS->osNv_rdcr4 = nv_rdcr4;
|
||||
pOS->osNv_cpuid = nv_cpuid;
|
||||
#endif
|
||||
|
||||
pOS->osCallACPI_DSM = osCallACPI_DSM;
|
||||
pOS->osCallACPI_DDC = osCallACPI_DDC;
|
||||
pOS->osCallACPI_NVHG_ROM = osCallACPI_NVHG_ROM;
|
||||
pOS->osCallACPI_DOD = osCallACPI_DOD;
|
||||
pOS->osCallACPI_MXDM = osCallACPI_MXDM;
|
||||
pOS->osCallACPI_MXDS = osCallACPI_MXDS;
|
||||
|
||||
pOS->osDbgBreakpointEnabled = osDbgBreakpointEnabled;
|
||||
}
|
||||
|
||||
static void
|
||||
initOSSpecificProperties
|
||||
(
|
||||
OBJOS *pOS
|
||||
)
|
||||
{
|
||||
pOS->setProperty(pOS, PDB_PROP_OS_ONDEMAND_VBLANK_CONTROL_ENABLE_DEFAULT, NV_TRUE);
|
||||
pOS->setProperty(pOS, PDB_PROP_OS_CACHED_MEMORY_MAPPINGS_FOR_ACPI_TABLE, NV_TRUE);
|
||||
pOS->setProperty(pOS, PDB_PROP_OS_LIMIT_GPU_RESET, NV_TRUE);
|
||||
}
|
||||
@@ -1,659 +0,0 @@
|
||||
#define NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_binary_api_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xb7a47c = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
|
||||
|
||||
void __nvoc_init_BinaryApi(BinaryApi*);
|
||||
void __nvoc_init_funcTable_BinaryApi(BinaryApi*);
|
||||
NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_BinaryApi(BinaryApi*);
|
||||
void __nvoc_dtor_BinaryApi(BinaryApi*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApi;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_BinaryApi = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_BinaryApi,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_BinaryApi,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource.__nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_BinaryApi_GpuResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(BinaryApi, __nvoc_base_GpuResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_BinaryApi = {
|
||||
/*numRelatives=*/ 6,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_BinaryApi_BinaryApi,
|
||||
&__nvoc_rtti_BinaryApi_GpuResource,
|
||||
&__nvoc_rtti_BinaryApi_RmResource,
|
||||
&__nvoc_rtti_BinaryApi_RmResourceCommon,
|
||||
&__nvoc_rtti_BinaryApi_RsResource,
|
||||
&__nvoc_rtti_BinaryApi_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(BinaryApi),
|
||||
/*classId=*/ classId(BinaryApi),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "BinaryApi",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_BinaryApi,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_BinaryApi,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_BinaryApi
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_BinaryApi_gpuresControl(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return binapiControl((struct BinaryApi *)(((unsigned char *)pResource) - __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_GpuResource_binapiShareCallback(struct BinaryApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_binapiUnmap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_binapiGetMemInterMapParams(struct BinaryApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_binapiGetMemoryMappingDescriptor(struct BinaryApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApi_RmResource.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_binapiGetMapAddrSpace(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static NvHandle __nvoc_thunk_GpuResource_binapiGetInternalObjectHandle(struct BinaryApi *pGpuResource) {
|
||||
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_binapiControlFilter(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_binapiAddAdditionalDependants(struct RsClient *pClient, struct BinaryApi *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_binapiGetRefCount(struct BinaryApi *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_binapiCheckMemInterUnmap(struct BinaryApi *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApi_RmResource.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_binapiMapTo(struct BinaryApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_binapiControl_Prologue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_binapiGetRegBaseOffsetAndSize(struct BinaryApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RsResource_binapiCanCopy(struct BinaryApi *pResource) {
|
||||
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_binapiInternalControlForward(struct BinaryApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), command, pParams, size);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_binapiPreDestruct(struct BinaryApi *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_binapiUnmapFrom(struct BinaryApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_binapiControl_Epilogue(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_binapiControlLookup(struct BinaryApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_binapiMap(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApi_GpuResource.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_binapiAccessCallback(struct BinaryApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApi =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_GpuResource(GpuResource*);
|
||||
void __nvoc_dtor_BinaryApi(BinaryApi *pThis) {
|
||||
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_BinaryApi(BinaryApi *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_BinaryApi_fail_GpuResource;
|
||||
__nvoc_init_dataField_BinaryApi(pThis);
|
||||
|
||||
status = __nvoc_binapiConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_BinaryApi_fail__init;
|
||||
goto __nvoc_ctor_BinaryApi_exit; // Success
|
||||
|
||||
__nvoc_ctor_BinaryApi_fail__init:
|
||||
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
__nvoc_ctor_BinaryApi_fail_GpuResource:
|
||||
__nvoc_ctor_BinaryApi_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_BinaryApi_1(BinaryApi *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__binapiControl__ = &binapiControl_IMPL;
|
||||
|
||||
pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_BinaryApi_gpuresControl;
|
||||
|
||||
pThis->__binapiShareCallback__ = &__nvoc_thunk_GpuResource_binapiShareCallback;
|
||||
|
||||
pThis->__binapiUnmap__ = &__nvoc_thunk_GpuResource_binapiUnmap;
|
||||
|
||||
pThis->__binapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_binapiGetMemInterMapParams;
|
||||
|
||||
pThis->__binapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_binapiGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__binapiGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_binapiGetMapAddrSpace;
|
||||
|
||||
pThis->__binapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_binapiGetInternalObjectHandle;
|
||||
|
||||
pThis->__binapiControlFilter__ = &__nvoc_thunk_RsResource_binapiControlFilter;
|
||||
|
||||
pThis->__binapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_binapiAddAdditionalDependants;
|
||||
|
||||
pThis->__binapiGetRefCount__ = &__nvoc_thunk_RsResource_binapiGetRefCount;
|
||||
|
||||
pThis->__binapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_binapiCheckMemInterUnmap;
|
||||
|
||||
pThis->__binapiMapTo__ = &__nvoc_thunk_RsResource_binapiMapTo;
|
||||
|
||||
pThis->__binapiControl_Prologue__ = &__nvoc_thunk_RmResource_binapiControl_Prologue;
|
||||
|
||||
pThis->__binapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_binapiGetRegBaseOffsetAndSize;
|
||||
|
||||
pThis->__binapiCanCopy__ = &__nvoc_thunk_RsResource_binapiCanCopy;
|
||||
|
||||
pThis->__binapiInternalControlForward__ = &__nvoc_thunk_GpuResource_binapiInternalControlForward;
|
||||
|
||||
pThis->__binapiPreDestruct__ = &__nvoc_thunk_RsResource_binapiPreDestruct;
|
||||
|
||||
pThis->__binapiUnmapFrom__ = &__nvoc_thunk_RsResource_binapiUnmapFrom;
|
||||
|
||||
pThis->__binapiControl_Epilogue__ = &__nvoc_thunk_RmResource_binapiControl_Epilogue;
|
||||
|
||||
pThis->__binapiControlLookup__ = &__nvoc_thunk_RsResource_binapiControlLookup;
|
||||
|
||||
pThis->__binapiMap__ = &__nvoc_thunk_GpuResource_binapiMap;
|
||||
|
||||
pThis->__binapiAccessCallback__ = &__nvoc_thunk_RmResource_binapiAccessCallback;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_BinaryApi(BinaryApi *pThis) {
|
||||
__nvoc_init_funcTable_BinaryApi_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_GpuResource(GpuResource*);
|
||||
void __nvoc_init_BinaryApi(BinaryApi *pThis) {
|
||||
pThis->__nvoc_pbase_BinaryApi = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
|
||||
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
|
||||
__nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
__nvoc_init_funcTable_BinaryApi(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_BinaryApi(BinaryApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
BinaryApi *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(BinaryApi));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(BinaryApi));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_BinaryApi);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_BinaryApi(pThis);
|
||||
status = __nvoc_ctor_BinaryApi(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_BinaryApi_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_BinaryApi_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_BinaryApi(BinaryApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_BinaryApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x1c0579 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi;
|
||||
|
||||
void __nvoc_init_BinaryApiPrivileged(BinaryApiPrivileged*);
|
||||
void __nvoc_init_funcTable_BinaryApiPrivileged(BinaryApiPrivileged*);
|
||||
NV_STATUS __nvoc_ctor_BinaryApiPrivileged(BinaryApiPrivileged*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_BinaryApiPrivileged(BinaryApiPrivileged*);
|
||||
void __nvoc_dtor_BinaryApiPrivileged(BinaryApiPrivileged*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApiPrivileged;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_BinaryApiPrivileged = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_BinaryApiPrivileged,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_BinaryApiPrivileged,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_GpuResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi.__nvoc_base_GpuResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_BinaryApiPrivileged_BinaryApi = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_BinaryApi,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(BinaryApiPrivileged, __nvoc_base_BinaryApi),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_BinaryApiPrivileged = {
|
||||
/*numRelatives=*/ 7,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_BinaryApiPrivileged_BinaryApiPrivileged,
|
||||
&__nvoc_rtti_BinaryApiPrivileged_BinaryApi,
|
||||
&__nvoc_rtti_BinaryApiPrivileged_GpuResource,
|
||||
&__nvoc_rtti_BinaryApiPrivileged_RmResource,
|
||||
&__nvoc_rtti_BinaryApiPrivileged_RmResourceCommon,
|
||||
&__nvoc_rtti_BinaryApiPrivileged_RsResource,
|
||||
&__nvoc_rtti_BinaryApiPrivileged_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(BinaryApiPrivileged),
|
||||
/*classId=*/ classId(BinaryApiPrivileged),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "BinaryApiPrivileged",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_BinaryApiPrivileged,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_BinaryApiPrivileged,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_BinaryApiPrivileged
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_BinaryApiPrivileged_binapiControl(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return binapiprivControl((struct BinaryApiPrivileged *)(((unsigned char *)pResource) - __nvoc_rtti_BinaryApiPrivileged_BinaryApi.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_GpuResource_binapiprivShareCallback(struct BinaryApiPrivileged *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_binapiprivUnmap(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_binapiprivGetMemInterMapParams(struct BinaryApiPrivileged *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_binapiprivGetMemoryMappingDescriptor(struct BinaryApiPrivileged *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_binapiprivGetMapAddrSpace(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static NvHandle __nvoc_thunk_GpuResource_binapiprivGetInternalObjectHandle(struct BinaryApiPrivileged *pGpuResource) {
|
||||
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_binapiprivControlFilter(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_binapiprivAddAdditionalDependants(struct RsClient *pClient, struct BinaryApiPrivileged *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_binapiprivGetRefCount(struct BinaryApiPrivileged *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_binapiprivCheckMemInterUnmap(struct BinaryApiPrivileged *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_binapiprivMapTo(struct BinaryApiPrivileged *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_binapiprivControl_Prologue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_binapiprivGetRegBaseOffsetAndSize(struct BinaryApiPrivileged *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RsResource_binapiprivCanCopy(struct BinaryApiPrivileged *pResource) {
|
||||
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_binapiprivInternalControlForward(struct BinaryApiPrivileged *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), command, pParams, size);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_binapiprivPreDestruct(struct BinaryApiPrivileged *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_binapiprivUnmapFrom(struct BinaryApiPrivileged *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_binapiprivControl_Epilogue(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_binapiprivControlLookup(struct BinaryApiPrivileged *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_binapiprivMap(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_BinaryApiPrivileged_GpuResource.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_binapiprivAccessCallback(struct BinaryApiPrivileged *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_BinaryApiPrivileged_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_BinaryApiPrivileged =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_BinaryApi(BinaryApi*);
|
||||
void __nvoc_dtor_BinaryApiPrivileged(BinaryApiPrivileged *pThis) {
|
||||
__nvoc_dtor_BinaryApi(&pThis->__nvoc_base_BinaryApi);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_BinaryApiPrivileged(BinaryApiPrivileged *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_BinaryApi(BinaryApi* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_BinaryApiPrivileged(BinaryApiPrivileged *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_BinaryApi(&pThis->__nvoc_base_BinaryApi, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_BinaryApiPrivileged_fail_BinaryApi;
|
||||
__nvoc_init_dataField_BinaryApiPrivileged(pThis);
|
||||
|
||||
status = __nvoc_binapiprivConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_BinaryApiPrivileged_fail__init;
|
||||
goto __nvoc_ctor_BinaryApiPrivileged_exit; // Success
|
||||
|
||||
__nvoc_ctor_BinaryApiPrivileged_fail__init:
|
||||
__nvoc_dtor_BinaryApi(&pThis->__nvoc_base_BinaryApi);
|
||||
__nvoc_ctor_BinaryApiPrivileged_fail_BinaryApi:
|
||||
__nvoc_ctor_BinaryApiPrivileged_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_BinaryApiPrivileged_1(BinaryApiPrivileged *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__binapiprivControl__ = &binapiprivControl_IMPL;
|
||||
|
||||
pThis->__nvoc_base_BinaryApi.__binapiControl__ = &__nvoc_thunk_BinaryApiPrivileged_binapiControl;
|
||||
|
||||
pThis->__binapiprivShareCallback__ = &__nvoc_thunk_GpuResource_binapiprivShareCallback;
|
||||
|
||||
pThis->__binapiprivUnmap__ = &__nvoc_thunk_GpuResource_binapiprivUnmap;
|
||||
|
||||
pThis->__binapiprivGetMemInterMapParams__ = &__nvoc_thunk_RmResource_binapiprivGetMemInterMapParams;
|
||||
|
||||
pThis->__binapiprivGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_binapiprivGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__binapiprivGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_binapiprivGetMapAddrSpace;
|
||||
|
||||
pThis->__binapiprivGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_binapiprivGetInternalObjectHandle;
|
||||
|
||||
pThis->__binapiprivControlFilter__ = &__nvoc_thunk_RsResource_binapiprivControlFilter;
|
||||
|
||||
pThis->__binapiprivAddAdditionalDependants__ = &__nvoc_thunk_RsResource_binapiprivAddAdditionalDependants;
|
||||
|
||||
pThis->__binapiprivGetRefCount__ = &__nvoc_thunk_RsResource_binapiprivGetRefCount;
|
||||
|
||||
pThis->__binapiprivCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_binapiprivCheckMemInterUnmap;
|
||||
|
||||
pThis->__binapiprivMapTo__ = &__nvoc_thunk_RsResource_binapiprivMapTo;
|
||||
|
||||
pThis->__binapiprivControl_Prologue__ = &__nvoc_thunk_RmResource_binapiprivControl_Prologue;
|
||||
|
||||
pThis->__binapiprivGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_binapiprivGetRegBaseOffsetAndSize;
|
||||
|
||||
pThis->__binapiprivCanCopy__ = &__nvoc_thunk_RsResource_binapiprivCanCopy;
|
||||
|
||||
pThis->__binapiprivInternalControlForward__ = &__nvoc_thunk_GpuResource_binapiprivInternalControlForward;
|
||||
|
||||
pThis->__binapiprivPreDestruct__ = &__nvoc_thunk_RsResource_binapiprivPreDestruct;
|
||||
|
||||
pThis->__binapiprivUnmapFrom__ = &__nvoc_thunk_RsResource_binapiprivUnmapFrom;
|
||||
|
||||
pThis->__binapiprivControl_Epilogue__ = &__nvoc_thunk_RmResource_binapiprivControl_Epilogue;
|
||||
|
||||
pThis->__binapiprivControlLookup__ = &__nvoc_thunk_RsResource_binapiprivControlLookup;
|
||||
|
||||
pThis->__binapiprivMap__ = &__nvoc_thunk_GpuResource_binapiprivMap;
|
||||
|
||||
pThis->__binapiprivAccessCallback__ = &__nvoc_thunk_RmResource_binapiprivAccessCallback;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_BinaryApiPrivileged(BinaryApiPrivileged *pThis) {
|
||||
__nvoc_init_funcTable_BinaryApiPrivileged_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_BinaryApi(BinaryApi*);
|
||||
void __nvoc_init_BinaryApiPrivileged(BinaryApiPrivileged *pThis) {
|
||||
pThis->__nvoc_pbase_BinaryApiPrivileged = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource;
|
||||
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource;
|
||||
pThis->__nvoc_pbase_BinaryApi = &pThis->__nvoc_base_BinaryApi;
|
||||
__nvoc_init_BinaryApi(&pThis->__nvoc_base_BinaryApi);
|
||||
__nvoc_init_funcTable_BinaryApiPrivileged(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_BinaryApiPrivileged(BinaryApiPrivileged **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
BinaryApiPrivileged *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(BinaryApiPrivileged));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(BinaryApiPrivileged));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_BinaryApiPrivileged);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_BinaryApi.__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_BinaryApiPrivileged(pThis);
|
||||
status = __nvoc_ctor_BinaryApiPrivileged(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_BinaryApiPrivileged_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_BinaryApiPrivileged_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_BinaryApiPrivileged(BinaryApiPrivileged **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_BinaryApiPrivileged(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,416 +0,0 @@
|
||||
#ifndef _G_BINARY_API_NVOC_H_
|
||||
#define _G_BINARY_API_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "g_binary_api_nvoc.h"
|
||||
|
||||
#ifndef BINARY_API_H
|
||||
#define BINARY_API_H
|
||||
|
||||
#include "core/core.h"
|
||||
#include "rmapi/resource.h"
|
||||
#include "gpu/gpu_resource.h"
|
||||
#include "resserv/rs_resource.h"
|
||||
#include "rmapi/control.h"
|
||||
|
||||
#ifdef NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct BinaryApi {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct GpuResource __nvoc_base_GpuResource;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct GpuResource *__nvoc_pbase_GpuResource;
|
||||
struct BinaryApi *__nvoc_pbase_BinaryApi;
|
||||
NV_STATUS (*__binapiControl__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NvBool (*__binapiShareCallback__)(struct BinaryApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NV_STATUS (*__binapiUnmap__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RsCpuMapping *);
|
||||
NV_STATUS (*__binapiGetMemInterMapParams__)(struct BinaryApi *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__binapiGetMemoryMappingDescriptor__)(struct BinaryApi *, struct MEMORY_DESCRIPTOR **);
|
||||
NV_STATUS (*__binapiGetMapAddrSpace__)(struct BinaryApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
|
||||
NvHandle (*__binapiGetInternalObjectHandle__)(struct BinaryApi *);
|
||||
NV_STATUS (*__binapiControlFilter__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
void (*__binapiAddAdditionalDependants__)(struct RsClient *, struct BinaryApi *, RsResourceRef *);
|
||||
NvU32 (*__binapiGetRefCount__)(struct BinaryApi *);
|
||||
NV_STATUS (*__binapiCheckMemInterUnmap__)(struct BinaryApi *, NvBool);
|
||||
NV_STATUS (*__binapiMapTo__)(struct BinaryApi *, RS_RES_MAP_TO_PARAMS *);
|
||||
NV_STATUS (*__binapiControl_Prologue__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__binapiGetRegBaseOffsetAndSize__)(struct BinaryApi *, struct OBJGPU *, NvU32 *, NvU32 *);
|
||||
NvBool (*__binapiCanCopy__)(struct BinaryApi *);
|
||||
NV_STATUS (*__binapiInternalControlForward__)(struct BinaryApi *, NvU32, void *, NvU32);
|
||||
void (*__binapiPreDestruct__)(struct BinaryApi *);
|
||||
NV_STATUS (*__binapiUnmapFrom__)(struct BinaryApi *, RS_RES_UNMAP_FROM_PARAMS *);
|
||||
void (*__binapiControl_Epilogue__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__binapiControlLookup__)(struct BinaryApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NV_STATUS (*__binapiMap__)(struct BinaryApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
|
||||
NvBool (*__binapiAccessCallback__)(struct BinaryApi *, struct RsClient *, void *, RsAccessRight);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_BinaryApi_TYPEDEF__
|
||||
#define __NVOC_CLASS_BinaryApi_TYPEDEF__
|
||||
typedef struct BinaryApi BinaryApi;
|
||||
#endif /* __NVOC_CLASS_BinaryApi_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_BinaryApi
|
||||
#define __nvoc_class_id_BinaryApi 0xb7a47c
|
||||
#endif /* __nvoc_class_id_BinaryApi */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApi;
|
||||
|
||||
#define __staticCast_BinaryApi(pThis) \
|
||||
((pThis)->__nvoc_pbase_BinaryApi)
|
||||
|
||||
#ifdef __nvoc_binary_api_h_disabled
|
||||
#define __dynamicCast_BinaryApi(pThis) ((BinaryApi*)NULL)
|
||||
#else //__nvoc_binary_api_h_disabled
|
||||
#define __dynamicCast_BinaryApi(pThis) \
|
||||
((BinaryApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(BinaryApi)))
|
||||
#endif //__nvoc_binary_api_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_BinaryApi(BinaryApi**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_BinaryApi(BinaryApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
#define __objCreate_BinaryApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
|
||||
__nvoc_objCreate_BinaryApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
|
||||
|
||||
#define binapiControl(pResource, pCallContext, pParams) binapiControl_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define binapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) binapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define binapiUnmap(pGpuResource, pCallContext, pCpuMapping) binapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
|
||||
#define binapiGetMemInterMapParams(pRmResource, pParams) binapiGetMemInterMapParams_DISPATCH(pRmResource, pParams)
|
||||
#define binapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) binapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
|
||||
#define binapiGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) binapiGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
|
||||
#define binapiGetInternalObjectHandle(pGpuResource) binapiGetInternalObjectHandle_DISPATCH(pGpuResource)
|
||||
#define binapiControlFilter(pResource, pCallContext, pParams) binapiControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define binapiAddAdditionalDependants(pClient, pResource, pReference) binapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define binapiGetRefCount(pResource) binapiGetRefCount_DISPATCH(pResource)
|
||||
#define binapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) binapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
|
||||
#define binapiMapTo(pResource, pParams) binapiMapTo_DISPATCH(pResource, pParams)
|
||||
#define binapiControl_Prologue(pResource, pCallContext, pParams) binapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define binapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) binapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize)
|
||||
#define binapiCanCopy(pResource) binapiCanCopy_DISPATCH(pResource)
|
||||
#define binapiInternalControlForward(pGpuResource, command, pParams, size) binapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
|
||||
#define binapiPreDestruct(pResource) binapiPreDestruct_DISPATCH(pResource)
|
||||
#define binapiUnmapFrom(pResource, pParams) binapiUnmapFrom_DISPATCH(pResource, pParams)
|
||||
#define binapiControl_Epilogue(pResource, pCallContext, pParams) binapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define binapiControlLookup(pResource, pParams, ppEntry) binapiControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define binapiMap(pGpuResource, pCallContext, pParams, pCpuMapping) binapiMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
|
||||
#define binapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) binapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
NV_STATUS binapiControl_IMPL(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams);
|
||||
|
||||
static inline NV_STATUS binapiControl_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__binapiControl__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NvBool binapiShareCallback_DISPATCH(struct BinaryApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pGpuResource->__binapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiUnmap_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__binapiUnmap__(pGpuResource, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiGetMemInterMapParams_DISPATCH(struct BinaryApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pRmResource->__binapiGetMemInterMapParams__(pRmResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiGetMemoryMappingDescriptor_DISPATCH(struct BinaryApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pRmResource->__binapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiGetMapAddrSpace_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return pGpuResource->__binapiGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static inline NvHandle binapiGetInternalObjectHandle_DISPATCH(struct BinaryApi *pGpuResource) {
|
||||
return pGpuResource->__binapiGetInternalObjectHandle__(pGpuResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiControlFilter_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__binapiControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline void binapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct BinaryApi *pResource, RsResourceRef *pReference) {
|
||||
pResource->__binapiAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline NvU32 binapiGetRefCount_DISPATCH(struct BinaryApi *pResource) {
|
||||
return pResource->__binapiGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiCheckMemInterUnmap_DISPATCH(struct BinaryApi *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return pRmResource->__binapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiMapTo_DISPATCH(struct BinaryApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pResource->__binapiMapTo__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiControl_Prologue_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__binapiControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiGetRegBaseOffsetAndSize_DISPATCH(struct BinaryApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return pGpuResource->__binapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static inline NvBool binapiCanCopy_DISPATCH(struct BinaryApi *pResource) {
|
||||
return pResource->__binapiCanCopy__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiInternalControlForward_DISPATCH(struct BinaryApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return pGpuResource->__binapiInternalControlForward__(pGpuResource, command, pParams, size);
|
||||
}
|
||||
|
||||
static inline void binapiPreDestruct_DISPATCH(struct BinaryApi *pResource) {
|
||||
pResource->__binapiPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiUnmapFrom_DISPATCH(struct BinaryApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pResource->__binapiUnmapFrom__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void binapiControl_Epilogue_DISPATCH(struct BinaryApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__binapiControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiControlLookup_DISPATCH(struct BinaryApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__binapiControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiMap_DISPATCH(struct BinaryApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__binapiMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NvBool binapiAccessCallback_DISPATCH(struct BinaryApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__binapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
NV_STATUS binapiConstruct_IMPL(struct BinaryApi *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
|
||||
#define __nvoc_binapiConstruct(arg_pResource, arg_pCallContext, arg_pParams) binapiConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#ifdef NVOC_BINARY_API_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct BinaryApiPrivileged {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct BinaryApi __nvoc_base_BinaryApi;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct GpuResource *__nvoc_pbase_GpuResource;
|
||||
struct BinaryApi *__nvoc_pbase_BinaryApi;
|
||||
struct BinaryApiPrivileged *__nvoc_pbase_BinaryApiPrivileged;
|
||||
NV_STATUS (*__binapiprivControl__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NvBool (*__binapiprivShareCallback__)(struct BinaryApiPrivileged *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NV_STATUS (*__binapiprivUnmap__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RsCpuMapping *);
|
||||
NV_STATUS (*__binapiprivGetMemInterMapParams__)(struct BinaryApiPrivileged *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__binapiprivGetMemoryMappingDescriptor__)(struct BinaryApiPrivileged *, struct MEMORY_DESCRIPTOR **);
|
||||
NV_STATUS (*__binapiprivGetMapAddrSpace__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
|
||||
NvHandle (*__binapiprivGetInternalObjectHandle__)(struct BinaryApiPrivileged *);
|
||||
NV_STATUS (*__binapiprivControlFilter__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
void (*__binapiprivAddAdditionalDependants__)(struct RsClient *, struct BinaryApiPrivileged *, RsResourceRef *);
|
||||
NvU32 (*__binapiprivGetRefCount__)(struct BinaryApiPrivileged *);
|
||||
NV_STATUS (*__binapiprivCheckMemInterUnmap__)(struct BinaryApiPrivileged *, NvBool);
|
||||
NV_STATUS (*__binapiprivMapTo__)(struct BinaryApiPrivileged *, RS_RES_MAP_TO_PARAMS *);
|
||||
NV_STATUS (*__binapiprivControl_Prologue__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__binapiprivGetRegBaseOffsetAndSize__)(struct BinaryApiPrivileged *, struct OBJGPU *, NvU32 *, NvU32 *);
|
||||
NvBool (*__binapiprivCanCopy__)(struct BinaryApiPrivileged *);
|
||||
NV_STATUS (*__binapiprivInternalControlForward__)(struct BinaryApiPrivileged *, NvU32, void *, NvU32);
|
||||
void (*__binapiprivPreDestruct__)(struct BinaryApiPrivileged *);
|
||||
NV_STATUS (*__binapiprivUnmapFrom__)(struct BinaryApiPrivileged *, RS_RES_UNMAP_FROM_PARAMS *);
|
||||
void (*__binapiprivControl_Epilogue__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__binapiprivControlLookup__)(struct BinaryApiPrivileged *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NV_STATUS (*__binapiprivMap__)(struct BinaryApiPrivileged *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
|
||||
NvBool (*__binapiprivAccessCallback__)(struct BinaryApiPrivileged *, struct RsClient *, void *, RsAccessRight);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__
|
||||
#define __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__
|
||||
typedef struct BinaryApiPrivileged BinaryApiPrivileged;
|
||||
#endif /* __NVOC_CLASS_BinaryApiPrivileged_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_BinaryApiPrivileged
|
||||
#define __nvoc_class_id_BinaryApiPrivileged 0x1c0579
|
||||
#endif /* __nvoc_class_id_BinaryApiPrivileged */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_BinaryApiPrivileged;
|
||||
|
||||
#define __staticCast_BinaryApiPrivileged(pThis) \
|
||||
((pThis)->__nvoc_pbase_BinaryApiPrivileged)
|
||||
|
||||
#ifdef __nvoc_binary_api_h_disabled
|
||||
#define __dynamicCast_BinaryApiPrivileged(pThis) ((BinaryApiPrivileged*)NULL)
|
||||
#else //__nvoc_binary_api_h_disabled
|
||||
#define __dynamicCast_BinaryApiPrivileged(pThis) \
|
||||
((BinaryApiPrivileged*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(BinaryApiPrivileged)))
|
||||
#endif //__nvoc_binary_api_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_BinaryApiPrivileged(BinaryApiPrivileged**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_BinaryApiPrivileged(BinaryApiPrivileged**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
#define __objCreate_BinaryApiPrivileged(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
|
||||
__nvoc_objCreate_BinaryApiPrivileged((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
|
||||
|
||||
#define binapiprivControl(pResource, pCallContext, pParams) binapiprivControl_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define binapiprivShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) binapiprivShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define binapiprivUnmap(pGpuResource, pCallContext, pCpuMapping) binapiprivUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
|
||||
#define binapiprivGetMemInterMapParams(pRmResource, pParams) binapiprivGetMemInterMapParams_DISPATCH(pRmResource, pParams)
|
||||
#define binapiprivGetMemoryMappingDescriptor(pRmResource, ppMemDesc) binapiprivGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
|
||||
#define binapiprivGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) binapiprivGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
|
||||
#define binapiprivGetInternalObjectHandle(pGpuResource) binapiprivGetInternalObjectHandle_DISPATCH(pGpuResource)
|
||||
#define binapiprivControlFilter(pResource, pCallContext, pParams) binapiprivControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define binapiprivAddAdditionalDependants(pClient, pResource, pReference) binapiprivAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define binapiprivGetRefCount(pResource) binapiprivGetRefCount_DISPATCH(pResource)
|
||||
#define binapiprivCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) binapiprivCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
|
||||
#define binapiprivMapTo(pResource, pParams) binapiprivMapTo_DISPATCH(pResource, pParams)
|
||||
#define binapiprivControl_Prologue(pResource, pCallContext, pParams) binapiprivControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define binapiprivGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) binapiprivGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize)
|
||||
#define binapiprivCanCopy(pResource) binapiprivCanCopy_DISPATCH(pResource)
|
||||
#define binapiprivInternalControlForward(pGpuResource, command, pParams, size) binapiprivInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
|
||||
#define binapiprivPreDestruct(pResource) binapiprivPreDestruct_DISPATCH(pResource)
|
||||
#define binapiprivUnmapFrom(pResource, pParams) binapiprivUnmapFrom_DISPATCH(pResource, pParams)
|
||||
#define binapiprivControl_Epilogue(pResource, pCallContext, pParams) binapiprivControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define binapiprivControlLookup(pResource, pParams, ppEntry) binapiprivControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define binapiprivMap(pGpuResource, pCallContext, pParams, pCpuMapping) binapiprivMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
|
||||
#define binapiprivAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) binapiprivAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
NV_STATUS binapiprivControl_IMPL(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams);
|
||||
|
||||
static inline NV_STATUS binapiprivControl_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__binapiprivControl__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NvBool binapiprivShareCallback_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pGpuResource->__binapiprivShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiprivUnmap_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__binapiprivUnmap__(pGpuResource, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiprivGetMemInterMapParams_DISPATCH(struct BinaryApiPrivileged *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pRmResource->__binapiprivGetMemInterMapParams__(pRmResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiprivGetMemoryMappingDescriptor_DISPATCH(struct BinaryApiPrivileged *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pRmResource->__binapiprivGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiprivGetMapAddrSpace_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return pGpuResource->__binapiprivGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static inline NvHandle binapiprivGetInternalObjectHandle_DISPATCH(struct BinaryApiPrivileged *pGpuResource) {
|
||||
return pGpuResource->__binapiprivGetInternalObjectHandle__(pGpuResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiprivControlFilter_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__binapiprivControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline void binapiprivAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct BinaryApiPrivileged *pResource, RsResourceRef *pReference) {
|
||||
pResource->__binapiprivAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline NvU32 binapiprivGetRefCount_DISPATCH(struct BinaryApiPrivileged *pResource) {
|
||||
return pResource->__binapiprivGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiprivCheckMemInterUnmap_DISPATCH(struct BinaryApiPrivileged *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return pRmResource->__binapiprivCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiprivMapTo_DISPATCH(struct BinaryApiPrivileged *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pResource->__binapiprivMapTo__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiprivControl_Prologue_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__binapiprivControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiprivGetRegBaseOffsetAndSize_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return pGpuResource->__binapiprivGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static inline NvBool binapiprivCanCopy_DISPATCH(struct BinaryApiPrivileged *pResource) {
|
||||
return pResource->__binapiprivCanCopy__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiprivInternalControlForward_DISPATCH(struct BinaryApiPrivileged *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return pGpuResource->__binapiprivInternalControlForward__(pGpuResource, command, pParams, size);
|
||||
}
|
||||
|
||||
static inline void binapiprivPreDestruct_DISPATCH(struct BinaryApiPrivileged *pResource) {
|
||||
pResource->__binapiprivPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiprivUnmapFrom_DISPATCH(struct BinaryApiPrivileged *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pResource->__binapiprivUnmapFrom__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void binapiprivControl_Epilogue_DISPATCH(struct BinaryApiPrivileged *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__binapiprivControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiprivControlLookup_DISPATCH(struct BinaryApiPrivileged *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__binapiprivControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NV_STATUS binapiprivMap_DISPATCH(struct BinaryApiPrivileged *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__binapiprivMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NvBool binapiprivAccessCallback_DISPATCH(struct BinaryApiPrivileged *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__binapiprivAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
NV_STATUS binapiprivConstruct_IMPL(struct BinaryApiPrivileged *arg_pResource, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
|
||||
#define __nvoc_binapiprivConstruct(arg_pResource, arg_pCallContext, arg_pParams) binapiprivConstruct_IMPL(arg_pResource, arg_pCallContext, arg_pParams)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_BINARY_API_NVOC_H_
|
||||
@@ -1,385 +0,0 @@
|
||||
#define NVOC_CLIENT_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_client_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x21d236 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared;
|
||||
|
||||
void __nvoc_init_UserInfo(UserInfo*);
|
||||
void __nvoc_init_funcTable_UserInfo(UserInfo*);
|
||||
NV_STATUS __nvoc_ctor_UserInfo(UserInfo*);
|
||||
void __nvoc_init_dataField_UserInfo(UserInfo*);
|
||||
void __nvoc_dtor_UserInfo(UserInfo*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_UserInfo;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_UserInfo_UserInfo = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_UserInfo,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_UserInfo,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_UserInfo_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(UserInfo, __nvoc_base_RsShared.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_UserInfo_RsShared = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsShared,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(UserInfo, __nvoc_base_RsShared),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_UserInfo = {
|
||||
/*numRelatives=*/ 3,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_UserInfo_UserInfo,
|
||||
&__nvoc_rtti_UserInfo_RsShared,
|
||||
&__nvoc_rtti_UserInfo_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_UserInfo =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(UserInfo),
|
||||
/*classId=*/ classId(UserInfo),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "UserInfo",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_UserInfo,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_UserInfo,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_UserInfo
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_UserInfo =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_RsShared(RsShared*);
|
||||
void __nvoc_dtor_UserInfo(UserInfo *pThis) {
|
||||
__nvoc_userinfoDestruct(pThis);
|
||||
__nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_UserInfo(UserInfo *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_RsShared(RsShared* );
|
||||
NV_STATUS __nvoc_ctor_UserInfo(UserInfo *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared);
|
||||
if (status != NV_OK) goto __nvoc_ctor_UserInfo_fail_RsShared;
|
||||
__nvoc_init_dataField_UserInfo(pThis);
|
||||
|
||||
status = __nvoc_userinfoConstruct(pThis);
|
||||
if (status != NV_OK) goto __nvoc_ctor_UserInfo_fail__init;
|
||||
goto __nvoc_ctor_UserInfo_exit; // Success
|
||||
|
||||
__nvoc_ctor_UserInfo_fail__init:
|
||||
__nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared);
|
||||
__nvoc_ctor_UserInfo_fail_RsShared:
|
||||
__nvoc_ctor_UserInfo_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_UserInfo_1(UserInfo *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_UserInfo(UserInfo *pThis) {
|
||||
__nvoc_init_funcTable_UserInfo_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_RsShared(RsShared*);
|
||||
void __nvoc_init_UserInfo(UserInfo *pThis) {
|
||||
pThis->__nvoc_pbase_UserInfo = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared;
|
||||
__nvoc_init_RsShared(&pThis->__nvoc_base_RsShared);
|
||||
__nvoc_init_funcTable_UserInfo(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_UserInfo(UserInfo **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
UserInfo *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(UserInfo));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(UserInfo));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_UserInfo);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_UserInfo(pThis);
|
||||
status = __nvoc_ctor_UserInfo(pThis);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_UserInfo_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_UserInfo_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_UserInfo(UserInfo **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_UserInfo(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xb23d83 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsClient;
|
||||
|
||||
void __nvoc_init_RmClient(RmClient*);
|
||||
void __nvoc_init_funcTable_RmClient(RmClient*);
|
||||
NV_STATUS __nvoc_ctor_RmClient(RmClient*, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_RmClient(RmClient*);
|
||||
void __nvoc_dtor_RmClient(RmClient*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClient;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_RmClient_RmClient = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmClient,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmClient,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_RmClient_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(RmClient, __nvoc_base_RsClient.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_RmClient_RsClient = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsClient,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(RmClient, __nvoc_base_RsClient),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_RmClient = {
|
||||
/*numRelatives=*/ 3,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_RmClient_RmClient,
|
||||
&__nvoc_rtti_RmClient_RsClient,
|
||||
&__nvoc_rtti_RmClient_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_RmClient =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(RmClient),
|
||||
/*classId=*/ classId(RmClient),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "RmClient",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_RmClient,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_RmClient,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_RmClient
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmClient_clientValidate(struct RsClient *pClient, const API_SECURITY_INFO *pSecInfo) {
|
||||
return rmclientValidate((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pSecInfo);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmClient_clientFreeResource(struct RsClient *pClient, struct RsServer *pServer, struct RS_RES_FREE_PARAMS_INTERNAL *pParams) {
|
||||
return rmclientFreeResource((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pServer, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmClient_clientInterMap(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RsResourceRef *pMappableRef, struct RS_INTER_MAP_PARAMS *pParams) {
|
||||
return rmclientInterMap((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pMapperRef, pMappableRef, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmClient_clientInterUnmap(struct RsClient *pClient, struct RsResourceRef *pMapperRef, struct RS_INTER_UNMAP_PARAMS *pParams) {
|
||||
rmclientInterUnmap((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), pMapperRef, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmClient_clientPostProcessPendingFreeList(struct RsClient *pClient, struct RsResourceRef **ppFirstLowPriRef) {
|
||||
return rmclientPostProcessPendingFreeList((struct RmClient *)(((unsigned char *)pClient) - __nvoc_rtti_RmClient_RsClient.offset), ppFirstLowPriRef);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsClient_rmclientDestructResourceRef(struct RmClient *pClient, RsServer *pServer, struct RsResourceRef *pResourceRef) {
|
||||
return clientDestructResourceRef((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), pServer, pResourceRef);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsClient_rmclientValidateNewResourceHandle(struct RmClient *pClient, NvHandle hResource, NvBool bRestrict) {
|
||||
return clientValidateNewResourceHandle((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), hResource, bRestrict);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsClient_rmclientShareResource(struct RmClient *pClient, struct RsResourceRef *pResourceRef, RS_SHARE_POLICY *pSharePolicy, struct CALL_CONTEXT *pCallContext) {
|
||||
return clientShareResource((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), pResourceRef, pSharePolicy, pCallContext);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsClient_rmclientUnmapMemory(struct RmClient *pClient, struct RsResourceRef *pResourceRef, struct RS_LOCK_INFO *pLockInfo, struct RsCpuMapping **ppCpuMapping, API_SECURITY_INFO *pSecInfo) {
|
||||
return clientUnmapMemory((struct RsClient *)(((unsigned char *)pClient) + __nvoc_rtti_RmClient_RsClient.offset), pResourceRef, pLockInfo, ppCpuMapping, pSecInfo);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_RmClient =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_RsClient(RsClient*);
|
||||
void __nvoc_dtor_RmClient(RmClient *pThis) {
|
||||
__nvoc_rmclientDestruct(pThis);
|
||||
__nvoc_dtor_RsClient(&pThis->__nvoc_base_RsClient);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_RmClient(RmClient *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_RsClient(RsClient* , struct PORT_MEM_ALLOCATOR *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_RmClient(RmClient *pThis, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_RsClient(&pThis->__nvoc_base_RsClient, arg_pAllocator, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_RmClient_fail_RsClient;
|
||||
__nvoc_init_dataField_RmClient(pThis);
|
||||
|
||||
status = __nvoc_rmclientConstruct(pThis, arg_pAllocator, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_RmClient_fail__init;
|
||||
goto __nvoc_ctor_RmClient_exit; // Success
|
||||
|
||||
__nvoc_ctor_RmClient_fail__init:
|
||||
__nvoc_dtor_RsClient(&pThis->__nvoc_base_RsClient);
|
||||
__nvoc_ctor_RmClient_fail_RsClient:
|
||||
__nvoc_ctor_RmClient_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_RmClient_1(RmClient *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__rmclientValidate__ = &rmclientValidate_IMPL;
|
||||
|
||||
pThis->__rmclientFreeResource__ = &rmclientFreeResource_IMPL;
|
||||
|
||||
pThis->__rmclientInterMap__ = &rmclientInterMap_IMPL;
|
||||
|
||||
pThis->__rmclientInterUnmap__ = &rmclientInterUnmap_IMPL;
|
||||
|
||||
pThis->__rmclientPostProcessPendingFreeList__ = &rmclientPostProcessPendingFreeList_IMPL;
|
||||
|
||||
pThis->__nvoc_base_RsClient.__clientValidate__ = &__nvoc_thunk_RmClient_clientValidate;
|
||||
|
||||
pThis->__nvoc_base_RsClient.__clientFreeResource__ = &__nvoc_thunk_RmClient_clientFreeResource;
|
||||
|
||||
pThis->__nvoc_base_RsClient.__clientInterMap__ = &__nvoc_thunk_RmClient_clientInterMap;
|
||||
|
||||
pThis->__nvoc_base_RsClient.__clientInterUnmap__ = &__nvoc_thunk_RmClient_clientInterUnmap;
|
||||
|
||||
pThis->__nvoc_base_RsClient.__clientPostProcessPendingFreeList__ = &__nvoc_thunk_RmClient_clientPostProcessPendingFreeList;
|
||||
|
||||
pThis->__rmclientDestructResourceRef__ = &__nvoc_thunk_RsClient_rmclientDestructResourceRef;
|
||||
|
||||
pThis->__rmclientValidateNewResourceHandle__ = &__nvoc_thunk_RsClient_rmclientValidateNewResourceHandle;
|
||||
|
||||
pThis->__rmclientShareResource__ = &__nvoc_thunk_RsClient_rmclientShareResource;
|
||||
|
||||
pThis->__rmclientUnmapMemory__ = &__nvoc_thunk_RsClient_rmclientUnmapMemory;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_RmClient(RmClient *pThis) {
|
||||
__nvoc_init_funcTable_RmClient_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_RsClient(RsClient*);
|
||||
void __nvoc_init_RmClient(RmClient *pThis) {
|
||||
pThis->__nvoc_pbase_RmClient = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsClient.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsClient = &pThis->__nvoc_base_RsClient;
|
||||
__nvoc_init_RsClient(&pThis->__nvoc_base_RsClient);
|
||||
__nvoc_init_funcTable_RmClient(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_RmClient(RmClient **ppThis, Dynamic *pParent, NvU32 createFlags, struct PORT_MEM_ALLOCATOR * arg_pAllocator, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
RmClient *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(RmClient));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(RmClient));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_RmClient);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_RsClient.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_RsClient.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_RmClient(pThis);
|
||||
status = __nvoc_ctor_RmClient(pThis, arg_pAllocator, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_RmClient_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_RmClient_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_RmClient(RmClient **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
struct PORT_MEM_ALLOCATOR * arg_pAllocator = va_arg(args, struct PORT_MEM_ALLOCATOR *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_RmClient(ppThis, pParent, createFlags, arg_pAllocator, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,427 +0,0 @@
|
||||
#define NVOC_CONTEXT_DMA_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_context_dma_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x88441b = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier;
|
||||
|
||||
void __nvoc_init_ContextDma(ContextDma*);
|
||||
void __nvoc_init_funcTable_ContextDma(ContextDma*);
|
||||
NV_STATUS __nvoc_ctor_ContextDma(ContextDma*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_ContextDma(ContextDma*);
|
||||
void __nvoc_dtor_ContextDma(ContextDma*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_ContextDma;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_ContextDma = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_ContextDma,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_ContextDma,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_INotifier = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_INotifier,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_Notifier.__nvoc_base_INotifier),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_ContextDma_Notifier = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Notifier,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(ContextDma, __nvoc_base_Notifier),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_ContextDma = {
|
||||
/*numRelatives=*/ 7,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_ContextDma_ContextDma,
|
||||
&__nvoc_rtti_ContextDma_Notifier,
|
||||
&__nvoc_rtti_ContextDma_INotifier,
|
||||
&__nvoc_rtti_ContextDma_RmResource,
|
||||
&__nvoc_rtti_ContextDma_RmResourceCommon,
|
||||
&__nvoc_rtti_ContextDma_RsResource,
|
||||
&__nvoc_rtti_ContextDma_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(ContextDma),
|
||||
/*classId=*/ classId(ContextDma),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "ContextDma",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_ContextDma,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_ContextDma,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_ContextDma
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_ContextDma_resMapTo(struct RsResource *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return ctxdmaMapTo((struct ContextDma *)(((unsigned char *)pContextDma) - __nvoc_rtti_ContextDma_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_ContextDma_resUnmapFrom(struct RsResource *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return ctxdmaUnmapFrom((struct ContextDma *)(((unsigned char *)pContextDma) - __nvoc_rtti_ContextDma_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_ctxdmaShareCallback(struct ContextDma *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_ctxdmaCheckMemInterUnmap(struct ContextDma *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ContextDma_RmResource.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_ctxdmaAccessCallback(struct ContextDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_ctxdmaGetMemInterMapParams(struct ContextDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ContextDma_RmResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_ctxdmaGetMemoryMappingDescriptor(struct ContextDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_ContextDma_RmResource.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_Notifier_ctxdmaSetNotificationShare(struct ContextDma *pNotifier, struct NotifShare *pNotifShare) {
|
||||
notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset), pNotifShare);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_ctxdmaControl(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_ctxdmaControlFilter(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_ctxdmaGetRefCount(struct ContextDma *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Notifier_ctxdmaUnregisterEvent(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
|
||||
return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_ctxdmaUnmap(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
|
||||
return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RsResource_ctxdmaCanCopy(struct ContextDma *pResource) {
|
||||
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_ctxdmaControl_Prologue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_ctxdmaAddAdditionalDependants(struct RsClient *pClient, struct ContextDma *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_ctxdmaPreDestruct(struct ContextDma *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset));
|
||||
}
|
||||
|
||||
static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_ctxdmaGetNotificationListPtr(struct ContextDma *pNotifier) {
|
||||
return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_ctxdmaControl_Epilogue(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static struct NotifShare *__nvoc_thunk_Notifier_ctxdmaGetNotificationShare(struct ContextDma *pNotifier) {
|
||||
return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_ctxdmaControlLookup(struct ContextDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_ctxdmaMap(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
|
||||
return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_ContextDma_RsResource.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Notifier_ctxdmaGetOrAllocNotifShare(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
|
||||
return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) + __nvoc_rtti_ContextDma_Notifier.offset), hNotifierClient, hNotifierResource, ppNotifShare);
|
||||
}
|
||||
|
||||
#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG)
|
||||
#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0)
|
||||
#endif
|
||||
|
||||
static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_ContextDma[] =
|
||||
{
|
||||
{ /* [0] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdUpdateContextdma_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u)
|
||||
/*flags=*/ 0x0u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x20101u,
|
||||
/*paramSize=*/ sizeof(NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "ctxdmaCtrlCmdUpdateContextdma"
|
||||
#endif
|
||||
},
|
||||
{ /* [1] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdBindContextdma_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
|
||||
/*flags=*/ 0x10u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x20102u,
|
||||
/*paramSize=*/ sizeof(NV0002_CTRL_BIND_CONTEXTDMA_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "ctxdmaCtrlCmdBindContextdma"
|
||||
#endif
|
||||
},
|
||||
{ /* [2] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) ctxdmaCtrlCmdUnbindContextdma_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
|
||||
/*flags=*/ 0x10u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x20103u,
|
||||
/*paramSize=*/ sizeof(NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_ContextDma.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "ctxdmaCtrlCmdUnbindContextdma"
|
||||
#endif
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_ContextDma =
|
||||
{
|
||||
/*numEntries=*/ 3,
|
||||
/*pExportEntries=*/ __nvoc_exported_method_def_ContextDma
|
||||
};
|
||||
|
||||
void __nvoc_dtor_RmResource(RmResource*);
|
||||
void __nvoc_dtor_Notifier(Notifier*);
|
||||
void __nvoc_dtor_ContextDma(ContextDma *pThis) {
|
||||
__nvoc_ctxdmaDestruct(pThis);
|
||||
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
__nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_ContextDma(ContextDma *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_Notifier(Notifier* , struct CALL_CONTEXT *);
|
||||
NV_STATUS __nvoc_ctor_ContextDma(ContextDma *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail_RmResource;
|
||||
status = __nvoc_ctor_Notifier(&pThis->__nvoc_base_Notifier, arg_pCallContext);
|
||||
if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail_Notifier;
|
||||
__nvoc_init_dataField_ContextDma(pThis);
|
||||
|
||||
status = __nvoc_ctxdmaConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_ContextDma_fail__init;
|
||||
goto __nvoc_ctor_ContextDma_exit; // Success
|
||||
|
||||
__nvoc_ctor_ContextDma_fail__init:
|
||||
__nvoc_dtor_Notifier(&pThis->__nvoc_base_Notifier);
|
||||
__nvoc_ctor_ContextDma_fail_Notifier:
|
||||
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
__nvoc_ctor_ContextDma_fail_RmResource:
|
||||
__nvoc_ctor_ContextDma_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_ContextDma_1(ContextDma *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__ctxdmaValidate__ = &ctxdmaValidate_IMPL;
|
||||
|
||||
pThis->__ctxdmaGetKernelVA__ = &ctxdmaGetKernelVA_IMPL;
|
||||
|
||||
pThis->__ctxdmaMapTo__ = &ctxdmaMapTo_IMPL;
|
||||
|
||||
pThis->__ctxdmaUnmapFrom__ = &ctxdmaUnmapFrom_IMPL;
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x0u)
|
||||
pThis->__ctxdmaCtrlCmdUpdateContextdma__ = &ctxdmaCtrlCmdUpdateContextdma_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
|
||||
pThis->__ctxdmaCtrlCmdBindContextdma__ = &ctxdmaCtrlCmdBindContextdma_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
|
||||
pThis->__ctxdmaCtrlCmdUnbindContextdma__ = &ctxdmaCtrlCmdUnbindContextdma_IMPL;
|
||||
#endif
|
||||
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resMapTo__ = &__nvoc_thunk_ContextDma_resMapTo;
|
||||
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resUnmapFrom__ = &__nvoc_thunk_ContextDma_resUnmapFrom;
|
||||
|
||||
pThis->__ctxdmaShareCallback__ = &__nvoc_thunk_RmResource_ctxdmaShareCallback;
|
||||
|
||||
pThis->__ctxdmaCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_ctxdmaCheckMemInterUnmap;
|
||||
|
||||
pThis->__ctxdmaAccessCallback__ = &__nvoc_thunk_RmResource_ctxdmaAccessCallback;
|
||||
|
||||
pThis->__ctxdmaGetMemInterMapParams__ = &__nvoc_thunk_RmResource_ctxdmaGetMemInterMapParams;
|
||||
|
||||
pThis->__ctxdmaGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_ctxdmaGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__ctxdmaSetNotificationShare__ = &__nvoc_thunk_Notifier_ctxdmaSetNotificationShare;
|
||||
|
||||
pThis->__ctxdmaControl__ = &__nvoc_thunk_RsResource_ctxdmaControl;
|
||||
|
||||
pThis->__ctxdmaControlFilter__ = &__nvoc_thunk_RsResource_ctxdmaControlFilter;
|
||||
|
||||
pThis->__ctxdmaGetRefCount__ = &__nvoc_thunk_RsResource_ctxdmaGetRefCount;
|
||||
|
||||
pThis->__ctxdmaUnregisterEvent__ = &__nvoc_thunk_Notifier_ctxdmaUnregisterEvent;
|
||||
|
||||
pThis->__ctxdmaUnmap__ = &__nvoc_thunk_RsResource_ctxdmaUnmap;
|
||||
|
||||
pThis->__ctxdmaCanCopy__ = &__nvoc_thunk_RsResource_ctxdmaCanCopy;
|
||||
|
||||
pThis->__ctxdmaControl_Prologue__ = &__nvoc_thunk_RmResource_ctxdmaControl_Prologue;
|
||||
|
||||
pThis->__ctxdmaAddAdditionalDependants__ = &__nvoc_thunk_RsResource_ctxdmaAddAdditionalDependants;
|
||||
|
||||
pThis->__ctxdmaPreDestruct__ = &__nvoc_thunk_RsResource_ctxdmaPreDestruct;
|
||||
|
||||
pThis->__ctxdmaGetNotificationListPtr__ = &__nvoc_thunk_Notifier_ctxdmaGetNotificationListPtr;
|
||||
|
||||
pThis->__ctxdmaControl_Epilogue__ = &__nvoc_thunk_RmResource_ctxdmaControl_Epilogue;
|
||||
|
||||
pThis->__ctxdmaGetNotificationShare__ = &__nvoc_thunk_Notifier_ctxdmaGetNotificationShare;
|
||||
|
||||
pThis->__ctxdmaControlLookup__ = &__nvoc_thunk_RsResource_ctxdmaControlLookup;
|
||||
|
||||
pThis->__ctxdmaMap__ = &__nvoc_thunk_RsResource_ctxdmaMap;
|
||||
|
||||
pThis->__ctxdmaGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_ctxdmaGetOrAllocNotifShare;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_ContextDma(ContextDma *pThis) {
|
||||
__nvoc_init_funcTable_ContextDma_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_RmResource(RmResource*);
|
||||
void __nvoc_init_Notifier(Notifier*);
|
||||
void __nvoc_init_ContextDma(ContextDma *pThis) {
|
||||
pThis->__nvoc_pbase_ContextDma = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource;
|
||||
pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_Notifier.__nvoc_base_INotifier;
|
||||
pThis->__nvoc_pbase_Notifier = &pThis->__nvoc_base_Notifier;
|
||||
__nvoc_init_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
__nvoc_init_Notifier(&pThis->__nvoc_base_Notifier);
|
||||
__nvoc_init_funcTable_ContextDma(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_ContextDma(ContextDma **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
ContextDma *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(ContextDma));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(ContextDma));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_ContextDma);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_ContextDma(pThis);
|
||||
status = __nvoc_ctor_ContextDma(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_ContextDma_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_ContextDma_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_ContextDma(ContextDma **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_ContextDma(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,356 +0,0 @@
|
||||
#ifndef _G_CONTEXT_DMA_NVOC_H_
|
||||
#define _G_CONTEXT_DMA_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "g_context_dma_nvoc.h"
|
||||
|
||||
#ifndef CONTEXT_DMA_H
|
||||
#define CONTEXT_DMA_H
|
||||
|
||||
#include "core/core.h"
|
||||
#include "gpu/mem_mgr/mem_desc.h"
|
||||
#include "rmapi/resource.h"
|
||||
#include "rmapi/event.h"
|
||||
#include "ctrl/ctrl0002.h"
|
||||
#include "rmapi/control.h" // for macro RMCTRL_EXPORT etc.
|
||||
#include "nvlimits.h"
|
||||
|
||||
struct Device;
|
||||
|
||||
#ifndef __NVOC_CLASS_Device_TYPEDEF__
|
||||
#define __NVOC_CLASS_Device_TYPEDEF__
|
||||
typedef struct Device Device;
|
||||
#endif /* __NVOC_CLASS_Device_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_Device
|
||||
#define __nvoc_class_id_Device 0xe0ac20
|
||||
#endif /* __nvoc_class_id_Device */
|
||||
|
||||
|
||||
struct Memory;
|
||||
|
||||
#ifndef __NVOC_CLASS_Memory_TYPEDEF__
|
||||
#define __NVOC_CLASS_Memory_TYPEDEF__
|
||||
typedef struct Memory Memory;
|
||||
#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_Memory
|
||||
#define __nvoc_class_id_Memory 0x4789f2
|
||||
#endif /* __nvoc_class_id_Memory */
|
||||
|
||||
|
||||
|
||||
/*!
|
||||
* RM internal class representing NV01_CONTEXT_DMA
|
||||
*/
|
||||
#ifdef NVOC_CONTEXT_DMA_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct ContextDma {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct RmResource __nvoc_base_RmResource;
|
||||
struct Notifier __nvoc_base_Notifier;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct INotifier *__nvoc_pbase_INotifier;
|
||||
struct Notifier *__nvoc_pbase_Notifier;
|
||||
struct ContextDma *__nvoc_pbase_ContextDma;
|
||||
NV_STATUS (*__ctxdmaValidate__)(struct ContextDma *, NvU64, NvU64);
|
||||
NV_STATUS (*__ctxdmaGetKernelVA__)(struct ContextDma *, NvU64, NvU64, void **, NvU32);
|
||||
NV_STATUS (*__ctxdmaMapTo__)(struct ContextDma *, struct RS_RES_MAP_TO_PARAMS *);
|
||||
NV_STATUS (*__ctxdmaUnmapFrom__)(struct ContextDma *, struct RS_RES_UNMAP_FROM_PARAMS *);
|
||||
NV_STATUS (*__ctxdmaCtrlCmdUpdateContextdma__)(struct ContextDma *, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *);
|
||||
NV_STATUS (*__ctxdmaCtrlCmdBindContextdma__)(struct ContextDma *, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *);
|
||||
NV_STATUS (*__ctxdmaCtrlCmdUnbindContextdma__)(struct ContextDma *, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *);
|
||||
NvBool (*__ctxdmaShareCallback__)(struct ContextDma *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NV_STATUS (*__ctxdmaCheckMemInterUnmap__)(struct ContextDma *, NvBool);
|
||||
NvBool (*__ctxdmaAccessCallback__)(struct ContextDma *, struct RsClient *, void *, RsAccessRight);
|
||||
NV_STATUS (*__ctxdmaGetMemInterMapParams__)(struct ContextDma *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__ctxdmaGetMemoryMappingDescriptor__)(struct ContextDma *, struct MEMORY_DESCRIPTOR **);
|
||||
void (*__ctxdmaSetNotificationShare__)(struct ContextDma *, struct NotifShare *);
|
||||
NV_STATUS (*__ctxdmaControl__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__ctxdmaControlFilter__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NvU32 (*__ctxdmaGetRefCount__)(struct ContextDma *);
|
||||
NV_STATUS (*__ctxdmaUnregisterEvent__)(struct ContextDma *, NvHandle, NvHandle, NvHandle, NvHandle);
|
||||
NV_STATUS (*__ctxdmaUnmap__)(struct ContextDma *, struct CALL_CONTEXT *, RsCpuMapping *);
|
||||
NvBool (*__ctxdmaCanCopy__)(struct ContextDma *);
|
||||
NV_STATUS (*__ctxdmaControl_Prologue__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
void (*__ctxdmaAddAdditionalDependants__)(struct RsClient *, struct ContextDma *, RsResourceRef *);
|
||||
void (*__ctxdmaPreDestruct__)(struct ContextDma *);
|
||||
PEVENTNOTIFICATION *(*__ctxdmaGetNotificationListPtr__)(struct ContextDma *);
|
||||
void (*__ctxdmaControl_Epilogue__)(struct ContextDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
struct NotifShare *(*__ctxdmaGetNotificationShare__)(struct ContextDma *);
|
||||
NV_STATUS (*__ctxdmaControlLookup__)(struct ContextDma *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NV_STATUS (*__ctxdmaMap__)(struct ContextDma *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *);
|
||||
NV_STATUS (*__ctxdmaGetOrAllocNotifShare__)(struct ContextDma *, NvHandle, NvHandle, struct NotifShare **);
|
||||
NvU32 Class;
|
||||
NvU32 Flags;
|
||||
NvBool bReadOnly;
|
||||
NvU32 CacheSnoop;
|
||||
NvU32 Type;
|
||||
NvU64 Limit;
|
||||
NV_ADDRESS_SPACE AddressSpace;
|
||||
NvBool bUnicast;
|
||||
void *KernelVAddr[8];
|
||||
void *KernelPriv;
|
||||
NvU64 FbAperture[8];
|
||||
NvU64 FbApertureLen[8];
|
||||
struct Memory *pMemory;
|
||||
struct MEMORY_DESCRIPTOR *pMemDesc;
|
||||
NvU32 Instance[8];
|
||||
NvU32 InstRefCount[8];
|
||||
struct OBJGPU *pGpu;
|
||||
struct Device *pDevice;
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__
|
||||
#define __NVOC_CLASS_ContextDma_TYPEDEF__
|
||||
typedef struct ContextDma ContextDma;
|
||||
#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_ContextDma
|
||||
#define __nvoc_class_id_ContextDma 0x88441b
|
||||
#endif /* __nvoc_class_id_ContextDma */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_ContextDma;
|
||||
|
||||
#define __staticCast_ContextDma(pThis) \
|
||||
((pThis)->__nvoc_pbase_ContextDma)
|
||||
|
||||
#ifdef __nvoc_context_dma_h_disabled
|
||||
#define __dynamicCast_ContextDma(pThis) ((ContextDma*)NULL)
|
||||
#else //__nvoc_context_dma_h_disabled
|
||||
#define __dynamicCast_ContextDma(pThis) \
|
||||
((ContextDma*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(ContextDma)))
|
||||
#endif //__nvoc_context_dma_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_ContextDma(ContextDma**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_ContextDma(ContextDma**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
#define __objCreate_ContextDma(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
|
||||
__nvoc_objCreate_ContextDma((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
|
||||
|
||||
#define ctxdmaValidate(pContextDma, start, len) ctxdmaValidate_DISPATCH(pContextDma, start, len)
|
||||
#define ctxdmaGetKernelVA(pContextDma, start, len, arg0, VA_idx) ctxdmaGetKernelVA_DISPATCH(pContextDma, start, len, arg0, VA_idx)
|
||||
#define ctxdmaMapTo(pContextDma, pParams) ctxdmaMapTo_DISPATCH(pContextDma, pParams)
|
||||
#define ctxdmaUnmapFrom(pContextDma, pParams) ctxdmaUnmapFrom_DISPATCH(pContextDma, pParams)
|
||||
#define ctxdmaCtrlCmdUpdateContextdma(pContextDma, pUpdateCtxtDmaParams) ctxdmaCtrlCmdUpdateContextdma_DISPATCH(pContextDma, pUpdateCtxtDmaParams)
|
||||
#define ctxdmaCtrlCmdBindContextdma(pContextDma, pBindCtxtDmaParams) ctxdmaCtrlCmdBindContextdma_DISPATCH(pContextDma, pBindCtxtDmaParams)
|
||||
#define ctxdmaCtrlCmdUnbindContextdma(pContextDma, pUnbindCtxtDmaParams) ctxdmaCtrlCmdUnbindContextdma_DISPATCH(pContextDma, pUnbindCtxtDmaParams)
|
||||
#define ctxdmaShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) ctxdmaShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define ctxdmaCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) ctxdmaCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
|
||||
#define ctxdmaAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) ctxdmaAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
#define ctxdmaGetMemInterMapParams(pRmResource, pParams) ctxdmaGetMemInterMapParams_DISPATCH(pRmResource, pParams)
|
||||
#define ctxdmaGetMemoryMappingDescriptor(pRmResource, ppMemDesc) ctxdmaGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
|
||||
#define ctxdmaSetNotificationShare(pNotifier, pNotifShare) ctxdmaSetNotificationShare_DISPATCH(pNotifier, pNotifShare)
|
||||
#define ctxdmaControl(pResource, pCallContext, pParams) ctxdmaControl_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define ctxdmaControlFilter(pResource, pCallContext, pParams) ctxdmaControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define ctxdmaGetRefCount(pResource) ctxdmaGetRefCount_DISPATCH(pResource)
|
||||
#define ctxdmaUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) ctxdmaUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent)
|
||||
#define ctxdmaUnmap(pResource, pCallContext, pCpuMapping) ctxdmaUnmap_DISPATCH(pResource, pCallContext, pCpuMapping)
|
||||
#define ctxdmaCanCopy(pResource) ctxdmaCanCopy_DISPATCH(pResource)
|
||||
#define ctxdmaControl_Prologue(pResource, pCallContext, pParams) ctxdmaControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define ctxdmaAddAdditionalDependants(pClient, pResource, pReference) ctxdmaAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define ctxdmaPreDestruct(pResource) ctxdmaPreDestruct_DISPATCH(pResource)
|
||||
#define ctxdmaGetNotificationListPtr(pNotifier) ctxdmaGetNotificationListPtr_DISPATCH(pNotifier)
|
||||
#define ctxdmaControl_Epilogue(pResource, pCallContext, pParams) ctxdmaControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define ctxdmaGetNotificationShare(pNotifier) ctxdmaGetNotificationShare_DISPATCH(pNotifier)
|
||||
#define ctxdmaControlLookup(pResource, pParams, ppEntry) ctxdmaControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define ctxdmaMap(pResource, pCallContext, pParams, pCpuMapping) ctxdmaMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping)
|
||||
#define ctxdmaGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) ctxdmaGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
|
||||
NV_STATUS ctxdmaValidate_IMPL(struct ContextDma *pContextDma, NvU64 start, NvU64 len);
|
||||
|
||||
static inline NV_STATUS ctxdmaValidate_DISPATCH(struct ContextDma *pContextDma, NvU64 start, NvU64 len) {
|
||||
return pContextDma->__ctxdmaValidate__(pContextDma, start, len);
|
||||
}
|
||||
|
||||
NV_STATUS ctxdmaGetKernelVA_IMPL(struct ContextDma *pContextDma, NvU64 start, NvU64 len, void **arg0, NvU32 VA_idx);
|
||||
|
||||
static inline NV_STATUS ctxdmaGetKernelVA_DISPATCH(struct ContextDma *pContextDma, NvU64 start, NvU64 len, void **arg0, NvU32 VA_idx) {
|
||||
return pContextDma->__ctxdmaGetKernelVA__(pContextDma, start, len, arg0, VA_idx);
|
||||
}
|
||||
|
||||
NV_STATUS ctxdmaMapTo_IMPL(struct ContextDma *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams);
|
||||
|
||||
static inline NV_STATUS ctxdmaMapTo_DISPATCH(struct ContextDma *pContextDma, struct RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pContextDma->__ctxdmaMapTo__(pContextDma, pParams);
|
||||
}
|
||||
|
||||
NV_STATUS ctxdmaUnmapFrom_IMPL(struct ContextDma *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams);
|
||||
|
||||
static inline NV_STATUS ctxdmaUnmapFrom_DISPATCH(struct ContextDma *pContextDma, struct RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pContextDma->__ctxdmaUnmapFrom__(pContextDma, pParams);
|
||||
}
|
||||
|
||||
NV_STATUS ctxdmaCtrlCmdUpdateContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *pUpdateCtxtDmaParams);
|
||||
|
||||
static inline NV_STATUS ctxdmaCtrlCmdUpdateContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_UPDATE_CONTEXTDMA_PARAMS *pUpdateCtxtDmaParams) {
|
||||
return pContextDma->__ctxdmaCtrlCmdUpdateContextdma__(pContextDma, pUpdateCtxtDmaParams);
|
||||
}
|
||||
|
||||
NV_STATUS ctxdmaCtrlCmdBindContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *pBindCtxtDmaParams);
|
||||
|
||||
static inline NV_STATUS ctxdmaCtrlCmdBindContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_BIND_CONTEXTDMA_PARAMS *pBindCtxtDmaParams) {
|
||||
return pContextDma->__ctxdmaCtrlCmdBindContextdma__(pContextDma, pBindCtxtDmaParams);
|
||||
}
|
||||
|
||||
NV_STATUS ctxdmaCtrlCmdUnbindContextdma_IMPL(struct ContextDma *pContextDma, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *pUnbindCtxtDmaParams);
|
||||
|
||||
static inline NV_STATUS ctxdmaCtrlCmdUnbindContextdma_DISPATCH(struct ContextDma *pContextDma, NV0002_CTRL_UNBIND_CONTEXTDMA_PARAMS *pUnbindCtxtDmaParams) {
|
||||
return pContextDma->__ctxdmaCtrlCmdUnbindContextdma__(pContextDma, pUnbindCtxtDmaParams);
|
||||
}
|
||||
|
||||
static inline NvBool ctxdmaShareCallback_DISPATCH(struct ContextDma *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pResource->__ctxdmaShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ctxdmaCheckMemInterUnmap_DISPATCH(struct ContextDma *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return pRmResource->__ctxdmaCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static inline NvBool ctxdmaAccessCallback_DISPATCH(struct ContextDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__ctxdmaAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ctxdmaGetMemInterMapParams_DISPATCH(struct ContextDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pRmResource->__ctxdmaGetMemInterMapParams__(pRmResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ctxdmaGetMemoryMappingDescriptor_DISPATCH(struct ContextDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pRmResource->__ctxdmaGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline void ctxdmaSetNotificationShare_DISPATCH(struct ContextDma *pNotifier, struct NotifShare *pNotifShare) {
|
||||
pNotifier->__ctxdmaSetNotificationShare__(pNotifier, pNotifShare);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ctxdmaControl_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__ctxdmaControl__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ctxdmaControlFilter_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__ctxdmaControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NvU32 ctxdmaGetRefCount_DISPATCH(struct ContextDma *pResource) {
|
||||
return pResource->__ctxdmaGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ctxdmaUnregisterEvent_DISPATCH(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
|
||||
return pNotifier->__ctxdmaUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ctxdmaUnmap_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
|
||||
return pResource->__ctxdmaUnmap__(pResource, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NvBool ctxdmaCanCopy_DISPATCH(struct ContextDma *pResource) {
|
||||
return pResource->__ctxdmaCanCopy__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ctxdmaControl_Prologue_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__ctxdmaControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline void ctxdmaAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct ContextDma *pResource, RsResourceRef *pReference) {
|
||||
pResource->__ctxdmaAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline void ctxdmaPreDestruct_DISPATCH(struct ContextDma *pResource) {
|
||||
pResource->__ctxdmaPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline PEVENTNOTIFICATION *ctxdmaGetNotificationListPtr_DISPATCH(struct ContextDma *pNotifier) {
|
||||
return pNotifier->__ctxdmaGetNotificationListPtr__(pNotifier);
|
||||
}
|
||||
|
||||
static inline void ctxdmaControl_Epilogue_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__ctxdmaControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline struct NotifShare *ctxdmaGetNotificationShare_DISPATCH(struct ContextDma *pNotifier) {
|
||||
return pNotifier->__ctxdmaGetNotificationShare__(pNotifier);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ctxdmaControlLookup_DISPATCH(struct ContextDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__ctxdmaControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ctxdmaMap_DISPATCH(struct ContextDma *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
|
||||
return pResource->__ctxdmaMap__(pResource, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NV_STATUS ctxdmaGetOrAllocNotifShare_DISPATCH(struct ContextDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
|
||||
return pNotifier->__ctxdmaGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare);
|
||||
}
|
||||
|
||||
NV_STATUS ctxdmaConstruct_IMPL(struct ContextDma *arg_pCtxdma, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
|
||||
#define __nvoc_ctxdmaConstruct(arg_pCtxdma, arg_pCallContext, arg_pParams) ctxdmaConstruct_IMPL(arg_pCtxdma, arg_pCallContext, arg_pParams)
|
||||
void ctxdmaDestruct_IMPL(struct ContextDma *pCtxdma);
|
||||
#define __nvoc_ctxdmaDestruct(pCtxdma) ctxdmaDestruct_IMPL(pCtxdma)
|
||||
NvBool ctxdmaIsBound_IMPL(struct ContextDma *pContextDma);
|
||||
#ifdef __nvoc_context_dma_h_disabled
|
||||
static inline NvBool ctxdmaIsBound(struct ContextDma *pContextDma) {
|
||||
NV_ASSERT_FAILED_PRECOMP("ContextDma was disabled!");
|
||||
return NV_FALSE;
|
||||
}
|
||||
#else //__nvoc_context_dma_h_disabled
|
||||
#define ctxdmaIsBound(pContextDma) ctxdmaIsBound_IMPL(pContextDma)
|
||||
#endif //__nvoc_context_dma_h_disabled
|
||||
|
||||
NV_STATUS ctxdmaGetByHandle_IMPL(struct RsClient *pClient, NvHandle hContextDma, struct ContextDma **arg0);
|
||||
#define ctxdmaGetByHandle(pClient, hContextDma, arg0) ctxdmaGetByHandle_IMPL(pClient, hContextDma, arg0)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
// ****************************************************************************
|
||||
// Deprecated Definitions
|
||||
// ****************************************************************************
|
||||
|
||||
#if RM_STRICT_CONFIG_EMIT_DEPRECATED_CONTEXT_DMA_DEFINITIONS == 1
|
||||
|
||||
/**
|
||||
* @warning This function is deprecated! Please use ctxdmaGetByHandle.
|
||||
*/
|
||||
NV_STATUS CliGetContextDma(NvHandle hClient, NvHandle hContextDma, struct ContextDma **);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* CONTEXT_DMA_H */
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_CONTEXT_DMA_NVOC_H_
|
||||
@@ -1,286 +0,0 @@
|
||||
#define NVOC_DCE_CLIENT_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_dce_client_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x61649c = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJDCECLIENTRM;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
|
||||
|
||||
void __nvoc_init_OBJDCECLIENTRM(OBJDCECLIENTRM*);
|
||||
void __nvoc_init_funcTable_OBJDCECLIENTRM(OBJDCECLIENTRM*);
|
||||
NV_STATUS __nvoc_ctor_OBJDCECLIENTRM(OBJDCECLIENTRM*);
|
||||
void __nvoc_init_dataField_OBJDCECLIENTRM(OBJDCECLIENTRM*);
|
||||
void __nvoc_dtor_OBJDCECLIENTRM(OBJDCECLIENTRM*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJDCECLIENTRM;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJDCECLIENTRM_OBJDCECLIENTRM = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJDCECLIENTRM,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJDCECLIENTRM,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJDCECLIENTRM_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJDCECLIENTRM, __nvoc_base_OBJENGSTATE),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJDCECLIENTRM = {
|
||||
/*numRelatives=*/ 3,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_OBJDCECLIENTRM_OBJDCECLIENTRM,
|
||||
&__nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE,
|
||||
&__nvoc_rtti_OBJDCECLIENTRM_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJDCECLIENTRM =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(OBJDCECLIENTRM),
|
||||
/*classId=*/ classId(OBJDCECLIENTRM),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "OBJDCECLIENTRM",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJDCECLIENTRM,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_OBJDCECLIENTRM,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_OBJDCECLIENTRM
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJDCECLIENTRM_engstateConstructEngine(struct OBJGPU *arg0, struct OBJENGSTATE *arg1, ENGDESCRIPTOR arg2) {
|
||||
return dceclientConstructEngine(arg0, (struct OBJDCECLIENTRM *)(((unsigned char *)arg1) - __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg2);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJDCECLIENTRM_engstateStateDestroy(struct OBJGPU *arg0, struct OBJENGSTATE *arg1) {
|
||||
dceclientStateDestroy(arg0, (struct OBJDCECLIENTRM *)(((unsigned char *)arg1) - __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJDCECLIENTRM_engstateStateLoad(struct OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) {
|
||||
return dceclientStateLoad(arg0, (struct OBJDCECLIENTRM *)(((unsigned char *)arg1) - __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg2);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJDCECLIENTRM_engstateStateUnload(struct OBJGPU *arg0, struct OBJENGSTATE *arg1, NvU32 arg2) {
|
||||
return dceclientStateUnload(arg0, (struct OBJDCECLIENTRM *)(((unsigned char *)arg1) - __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg2);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientReconcileTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
|
||||
return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStateInitLocked(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
|
||||
return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePreLoad(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePostUnload(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePreUnload(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStateInitUnlocked(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
|
||||
return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_dceclientInitMissing(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
|
||||
engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePreInitLocked(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
|
||||
return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePreInitUnlocked(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
|
||||
return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientGetTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
|
||||
return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientCompareTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunables1, void *pTunables2) {
|
||||
return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunables1, pTunables2);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_dceclientFreeTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
|
||||
engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientStatePostLoad(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientAllocTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void **ppTunableState) {
|
||||
return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), ppTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_dceclientSetTunableState(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
|
||||
return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_OBJENGSTATE_dceclientIsPresent(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
|
||||
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJDCECLIENTRM_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJDCECLIENTRM =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*);
|
||||
void __nvoc_dtor_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) {
|
||||
__nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
|
||||
NV_STATUS __nvoc_ctor_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJDCECLIENTRM_fail_OBJENGSTATE;
|
||||
__nvoc_init_dataField_OBJDCECLIENTRM(pThis);
|
||||
goto __nvoc_ctor_OBJDCECLIENTRM_exit; // Success
|
||||
|
||||
__nvoc_ctor_OBJDCECLIENTRM_fail_OBJENGSTATE:
|
||||
__nvoc_ctor_OBJDCECLIENTRM_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_OBJDCECLIENTRM_1(OBJDCECLIENTRM *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__dceclientConstructEngine__ = &dceclientConstructEngine_IMPL;
|
||||
|
||||
pThis->__dceclientStateDestroy__ = &dceclientStateDestroy_IMPL;
|
||||
|
||||
pThis->__dceclientStateLoad__ = &dceclientStateLoad_IMPL;
|
||||
|
||||
pThis->__dceclientStateUnload__ = &dceclientStateUnload_IMPL;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_OBJDCECLIENTRM_engstateConstructEngine;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_OBJDCECLIENTRM_engstateStateDestroy;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_OBJDCECLIENTRM_engstateStateLoad;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_OBJDCECLIENTRM_engstateStateUnload;
|
||||
|
||||
pThis->__dceclientReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientReconcileTunableState;
|
||||
|
||||
pThis->__dceclientStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_dceclientStateInitLocked;
|
||||
|
||||
pThis->__dceclientStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePreLoad;
|
||||
|
||||
pThis->__dceclientStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePostUnload;
|
||||
|
||||
pThis->__dceclientStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePreUnload;
|
||||
|
||||
pThis->__dceclientStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_dceclientStateInitUnlocked;
|
||||
|
||||
pThis->__dceclientInitMissing__ = &__nvoc_thunk_OBJENGSTATE_dceclientInitMissing;
|
||||
|
||||
pThis->__dceclientStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePreInitLocked;
|
||||
|
||||
pThis->__dceclientStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePreInitUnlocked;
|
||||
|
||||
pThis->__dceclientGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientGetTunableState;
|
||||
|
||||
pThis->__dceclientCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientCompareTunableState;
|
||||
|
||||
pThis->__dceclientFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientFreeTunableState;
|
||||
|
||||
pThis->__dceclientStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_dceclientStatePostLoad;
|
||||
|
||||
pThis->__dceclientAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientAllocTunableState;
|
||||
|
||||
pThis->__dceclientSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_dceclientSetTunableState;
|
||||
|
||||
pThis->__dceclientIsPresent__ = &__nvoc_thunk_OBJENGSTATE_dceclientIsPresent;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) {
|
||||
__nvoc_init_funcTable_OBJDCECLIENTRM_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_OBJENGSTATE(OBJENGSTATE*);
|
||||
void __nvoc_init_OBJDCECLIENTRM(OBJDCECLIENTRM *pThis) {
|
||||
pThis->__nvoc_pbase_OBJDCECLIENTRM = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE;
|
||||
__nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
__nvoc_init_funcTable_OBJDCECLIENTRM(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OBJDCECLIENTRM(OBJDCECLIENTRM **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
OBJDCECLIENTRM *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(OBJDCECLIENTRM));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(OBJDCECLIENTRM));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJDCECLIENTRM);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_OBJDCECLIENTRM(pThis);
|
||||
status = __nvoc_ctor_OBJDCECLIENTRM(pThis);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_OBJDCECLIENTRM_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_OBJDCECLIENTRM_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OBJDCECLIENTRM(OBJDCECLIENTRM **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_OBJDCECLIENTRM(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,368 +0,0 @@
|
||||
#ifndef _G_DCE_CLIENT_NVOC_H_
|
||||
#define _G_DCE_CLIENT_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "g_dce_client_nvoc.h"
|
||||
|
||||
#ifndef _DCE_CLIENT_H_
|
||||
#define _DCE_CLIENT_H_
|
||||
|
||||
/*!
|
||||
* @file dce_client.h
|
||||
* @brief Provides definitions for all DceClient data structures and interfaces.
|
||||
*/
|
||||
|
||||
#include "gpu/eng_state.h"
|
||||
#include "core/core.h"
|
||||
#include "objrpc.h"
|
||||
#include "os/dce_rm_client_ipc.h"
|
||||
#include "class/cl0000.h"
|
||||
#include "class/cl0080.h"
|
||||
#include "class/cl2080.h"
|
||||
#include "class/cl0073.h"
|
||||
#include "class/cl0005.h"
|
||||
#include "class/clc372sw.h"
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvHandle hClient;
|
||||
NvHandle hParent;
|
||||
NvHandle hObject;
|
||||
NvU32 hClass;
|
||||
NV0000_ALLOC_PARAMETERS rootAllocParams;
|
||||
NvBool valid;
|
||||
} ROOT;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvHandle hClient;
|
||||
NvHandle hParent;
|
||||
NvHandle hObject;
|
||||
NvU32 hClass;
|
||||
NV0080_ALLOC_PARAMETERS deviceAllocParams;
|
||||
NvBool valid;
|
||||
} DEVICE;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvHandle hClient;
|
||||
NvHandle hParent;
|
||||
NvHandle hObject;
|
||||
NvU32 hClass;
|
||||
NV2080_ALLOC_PARAMETERS subdeviceAllocParams;
|
||||
NvBool valid;
|
||||
} SUBDEVICE;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvHandle hClient;
|
||||
NvHandle hParent;
|
||||
NvHandle hObject;
|
||||
NvU32 hClass;
|
||||
NVOS21_PARAMETERS displayCommonAllocParams;
|
||||
NvBool valid;
|
||||
} DISPLAY_COMMON;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvHandle hClient;
|
||||
NvHandle hParent;
|
||||
NvHandle hObject;
|
||||
NvU32 hClass;
|
||||
NVOS21_PARAMETERS displaySWAllocParams;
|
||||
NvBool valid;
|
||||
} DISPLAY_SW;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvHandle hClient;
|
||||
NvHandle hParent;
|
||||
NvHandle hObject;
|
||||
NvU32 hClass;
|
||||
NV0005_ALLOC_PARAMETERS displaySWEventAllocParams;
|
||||
NvBool valid;
|
||||
} DISPLAY_SW_EVENT;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
NvHandle hClient;
|
||||
NvHandle hObject;
|
||||
NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS setEventParams;
|
||||
NvBool valid;
|
||||
} DISPLAY_HPD_CTRL;
|
||||
|
||||
/*!
|
||||
* Max no of RM clients
|
||||
*/
|
||||
#define MAX_RM_CLIENTS 5
|
||||
|
||||
/*!
|
||||
* Temporary alias of DceClient to OBJDCECLIENTRM
|
||||
*/
|
||||
#define DceClient OBJDCECLIENTRM
|
||||
|
||||
/*!
|
||||
* Defines the structure used to contain all generic information related to
|
||||
* the DceClient.
|
||||
*/
|
||||
#ifdef NVOC_DCE_CLIENT_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct OBJDCECLIENTRM {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
|
||||
struct OBJDCECLIENTRM *__nvoc_pbase_OBJDCECLIENTRM;
|
||||
NV_STATUS (*__dceclientConstructEngine__)(struct OBJGPU *, struct OBJDCECLIENTRM *, ENGDESCRIPTOR);
|
||||
void (*__dceclientStateDestroy__)(struct OBJGPU *, struct OBJDCECLIENTRM *);
|
||||
NV_STATUS (*__dceclientStateLoad__)(struct OBJGPU *, struct OBJDCECLIENTRM *, NvU32);
|
||||
NV_STATUS (*__dceclientStateUnload__)(struct OBJGPU *, struct OBJDCECLIENTRM *, NvU32);
|
||||
NV_STATUS (*__dceclientReconcileTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *);
|
||||
NV_STATUS (*__dceclientStateInitLocked__)(POBJGPU, struct OBJDCECLIENTRM *);
|
||||
NV_STATUS (*__dceclientStatePreLoad__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32);
|
||||
NV_STATUS (*__dceclientStatePostUnload__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32);
|
||||
NV_STATUS (*__dceclientStatePreUnload__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32);
|
||||
NV_STATUS (*__dceclientStateInitUnlocked__)(POBJGPU, struct OBJDCECLIENTRM *);
|
||||
void (*__dceclientInitMissing__)(POBJGPU, struct OBJDCECLIENTRM *);
|
||||
NV_STATUS (*__dceclientStatePreInitLocked__)(POBJGPU, struct OBJDCECLIENTRM *);
|
||||
NV_STATUS (*__dceclientStatePreInitUnlocked__)(POBJGPU, struct OBJDCECLIENTRM *);
|
||||
NV_STATUS (*__dceclientGetTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *);
|
||||
NV_STATUS (*__dceclientCompareTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *, void *);
|
||||
void (*__dceclientFreeTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *);
|
||||
NV_STATUS (*__dceclientStatePostLoad__)(POBJGPU, struct OBJDCECLIENTRM *, NvU32);
|
||||
NV_STATUS (*__dceclientAllocTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void **);
|
||||
NV_STATUS (*__dceclientSetTunableState__)(POBJGPU, struct OBJDCECLIENTRM *, void *);
|
||||
NvBool (*__dceclientIsPresent__)(POBJGPU, struct OBJDCECLIENTRM *);
|
||||
struct OBJRPC *pRpc;
|
||||
NvU32 clientId[2];
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__
|
||||
#define __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__
|
||||
typedef struct OBJDCECLIENTRM OBJDCECLIENTRM;
|
||||
#endif /* __NVOC_CLASS_OBJDCECLIENTRM_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_OBJDCECLIENTRM
|
||||
#define __nvoc_class_id_OBJDCECLIENTRM 0x61649c
|
||||
#endif /* __nvoc_class_id_OBJDCECLIENTRM */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJDCECLIENTRM;
|
||||
|
||||
#define __staticCast_OBJDCECLIENTRM(pThis) \
|
||||
((pThis)->__nvoc_pbase_OBJDCECLIENTRM)
|
||||
|
||||
#ifdef __nvoc_dce_client_h_disabled
|
||||
#define __dynamicCast_OBJDCECLIENTRM(pThis) ((OBJDCECLIENTRM*)NULL)
|
||||
#else //__nvoc_dce_client_h_disabled
|
||||
#define __dynamicCast_OBJDCECLIENTRM(pThis) \
|
||||
((OBJDCECLIENTRM*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJDCECLIENTRM)))
|
||||
#endif //__nvoc_dce_client_h_disabled
|
||||
|
||||
#define PDB_PROP_DCECLIENT_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
|
||||
#define PDB_PROP_DCECLIENT_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OBJDCECLIENTRM(OBJDCECLIENTRM**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OBJDCECLIENTRM(OBJDCECLIENTRM**, Dynamic*, NvU32);
|
||||
#define __objCreate_OBJDCECLIENTRM(ppNewObj, pParent, createFlags) \
|
||||
__nvoc_objCreate_OBJDCECLIENTRM((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
|
||||
|
||||
#define dceclientConstructEngine(arg0, arg1, arg2) dceclientConstructEngine_DISPATCH(arg0, arg1, arg2)
|
||||
#define dceclientStateDestroy(arg0, arg1) dceclientStateDestroy_DISPATCH(arg0, arg1)
|
||||
#define dceclientStateLoad(arg0, arg1, arg2) dceclientStateLoad_DISPATCH(arg0, arg1, arg2)
|
||||
#define dceclientStateUnload(arg0, arg1, arg2) dceclientStateUnload_DISPATCH(arg0, arg1, arg2)
|
||||
#define dceclientReconcileTunableState(pGpu, pEngstate, pTunableState) dceclientReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
|
||||
#define dceclientStateInitLocked(pGpu, pEngstate) dceclientStateInitLocked_DISPATCH(pGpu, pEngstate)
|
||||
#define dceclientStatePreLoad(pGpu, pEngstate, arg0) dceclientStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define dceclientStatePostUnload(pGpu, pEngstate, arg0) dceclientStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define dceclientStatePreUnload(pGpu, pEngstate, arg0) dceclientStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define dceclientStateInitUnlocked(pGpu, pEngstate) dceclientStateInitUnlocked_DISPATCH(pGpu, pEngstate)
|
||||
#define dceclientInitMissing(pGpu, pEngstate) dceclientInitMissing_DISPATCH(pGpu, pEngstate)
|
||||
#define dceclientStatePreInitLocked(pGpu, pEngstate) dceclientStatePreInitLocked_DISPATCH(pGpu, pEngstate)
|
||||
#define dceclientStatePreInitUnlocked(pGpu, pEngstate) dceclientStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
|
||||
#define dceclientGetTunableState(pGpu, pEngstate, pTunableState) dceclientGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
|
||||
#define dceclientCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) dceclientCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2)
|
||||
#define dceclientFreeTunableState(pGpu, pEngstate, pTunableState) dceclientFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
|
||||
#define dceclientStatePostLoad(pGpu, pEngstate, arg0) dceclientStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define dceclientAllocTunableState(pGpu, pEngstate, ppTunableState) dceclientAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState)
|
||||
#define dceclientSetTunableState(pGpu, pEngstate, pTunableState) dceclientSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
|
||||
#define dceclientIsPresent(pGpu, pEngstate) dceclientIsPresent_DISPATCH(pGpu, pEngstate)
|
||||
NV_STATUS dceclientConstructEngine_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, ENGDESCRIPTOR arg2);
|
||||
|
||||
static inline NV_STATUS dceclientConstructEngine_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, ENGDESCRIPTOR arg2) {
|
||||
return arg1->__dceclientConstructEngine__(arg0, arg1, arg2);
|
||||
}
|
||||
|
||||
void dceclientStateDestroy_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1);
|
||||
|
||||
static inline void dceclientStateDestroy_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1) {
|
||||
arg1->__dceclientStateDestroy__(arg0, arg1);
|
||||
}
|
||||
|
||||
NV_STATUS dceclientStateLoad_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvU32 arg2);
|
||||
|
||||
static inline NV_STATUS dceclientStateLoad_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvU32 arg2) {
|
||||
return arg1->__dceclientStateLoad__(arg0, arg1, arg2);
|
||||
}
|
||||
|
||||
NV_STATUS dceclientStateUnload_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvU32 arg2);
|
||||
|
||||
static inline NV_STATUS dceclientStateUnload_DISPATCH(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvU32 arg2) {
|
||||
return arg1->__dceclientStateUnload__(arg0, arg1, arg2);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dceclientReconcileTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
|
||||
return pEngstate->__dceclientReconcileTunableState__(pGpu, pEngstate, pTunableState);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dceclientStateInitLocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
|
||||
return pEngstate->__dceclientStateInitLocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dceclientStatePreLoad_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__dceclientStatePreLoad__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dceclientStatePostUnload_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__dceclientStatePostUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dceclientStatePreUnload_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__dceclientStatePreUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dceclientStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
|
||||
return pEngstate->__dceclientStateInitUnlocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline void dceclientInitMissing_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
|
||||
pEngstate->__dceclientInitMissing__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dceclientStatePreInitLocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
|
||||
return pEngstate->__dceclientStatePreInitLocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dceclientStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
|
||||
return pEngstate->__dceclientStatePreInitUnlocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dceclientGetTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
|
||||
return pEngstate->__dceclientGetTunableState__(pGpu, pEngstate, pTunableState);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dceclientCompareTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunables1, void *pTunables2) {
|
||||
return pEngstate->__dceclientCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2);
|
||||
}
|
||||
|
||||
static inline void dceclientFreeTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
|
||||
pEngstate->__dceclientFreeTunableState__(pGpu, pEngstate, pTunableState);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dceclientStatePostLoad_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__dceclientStatePostLoad__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dceclientAllocTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void **ppTunableState) {
|
||||
return pEngstate->__dceclientAllocTunableState__(pGpu, pEngstate, ppTunableState);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dceclientSetTunableState_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate, void *pTunableState) {
|
||||
return pEngstate->__dceclientSetTunableState__(pGpu, pEngstate, pTunableState);
|
||||
}
|
||||
|
||||
static inline NvBool dceclientIsPresent_DISPATCH(POBJGPU pGpu, struct OBJDCECLIENTRM *pEngstate) {
|
||||
return pEngstate->__dceclientIsPresent__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
NV_STATUS dceclientInitRpcInfra_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1);
|
||||
#ifdef __nvoc_dce_client_h_disabled
|
||||
static inline NV_STATUS dceclientInitRpcInfra(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1) {
|
||||
NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_dce_client_h_disabled
|
||||
#define dceclientInitRpcInfra(arg0, arg1) dceclientInitRpcInfra_IMPL(arg0, arg1)
|
||||
#endif //__nvoc_dce_client_h_disabled
|
||||
|
||||
void dceclientDeinitRpcInfra_IMPL(struct OBJDCECLIENTRM *arg0);
|
||||
#ifdef __nvoc_dce_client_h_disabled
|
||||
static inline void dceclientDeinitRpcInfra(struct OBJDCECLIENTRM *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!");
|
||||
}
|
||||
#else //__nvoc_dce_client_h_disabled
|
||||
#define dceclientDeinitRpcInfra(arg0) dceclientDeinitRpcInfra_IMPL(arg0)
|
||||
#endif //__nvoc_dce_client_h_disabled
|
||||
|
||||
NV_STATUS dceclientDceRmInit_IMPL(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvBool arg2);
|
||||
#ifdef __nvoc_dce_client_h_disabled
|
||||
static inline NV_STATUS dceclientDceRmInit(struct OBJGPU *arg0, struct OBJDCECLIENTRM *arg1, NvBool arg2) {
|
||||
NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_dce_client_h_disabled
|
||||
#define dceclientDceRmInit(arg0, arg1, arg2) dceclientDceRmInit_IMPL(arg0, arg1, arg2)
|
||||
#endif //__nvoc_dce_client_h_disabled
|
||||
|
||||
NV_STATUS dceclientSendRpc_IMPL(struct OBJDCECLIENTRM *arg0, void *arg1, NvU32 arg2);
|
||||
#ifdef __nvoc_dce_client_h_disabled
|
||||
static inline NV_STATUS dceclientSendRpc(struct OBJDCECLIENTRM *arg0, void *arg1, NvU32 arg2) {
|
||||
NV_ASSERT_FAILED_PRECOMP("OBJDCECLIENTRM was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_dce_client_h_disabled
|
||||
#define dceclientSendRpc(arg0, arg1, arg2) dceclientSendRpc_IMPL(arg0, arg1, arg2)
|
||||
#endif //__nvoc_dce_client_h_disabled
|
||||
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
NV_STATUS rpcRmApiControl_dce(RM_API *pRmApi,
|
||||
NvHandle hClient, NvHandle hObject,
|
||||
NvU32 cmd, void *pParamStructPtr,
|
||||
NvU32 paramsSize);
|
||||
NV_STATUS rpcRmApiAlloc_dce(RM_API *pRmApi,
|
||||
NvHandle hClient, NvHandle hParent,
|
||||
NvHandle hObject, NvU32 hClass,
|
||||
void *pAllocParams);
|
||||
NV_STATUS rpcRmApiDupObject_dce(RM_API *pRmApi, NvHandle hClient,
|
||||
NvHandle hParent, NvHandle *phObject, NvHandle hClientSrc,
|
||||
NvHandle hObjectSrc, NvU32 flags);
|
||||
NV_STATUS rpcRmApiFree_dce(RM_API *pRmApi, NvHandle hClient, NvHandle hObject);
|
||||
NV_STATUS rpcDceRmInit_dce(RM_API *pRmApi, NvBool bInit);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_DCE_CLIENT_NVOC_H_
|
||||
@@ -1,550 +0,0 @@
|
||||
#define NVOC_DEVICE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_device_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xe0ac20 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Device;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
|
||||
|
||||
void __nvoc_init_Device(Device*);
|
||||
void __nvoc_init_funcTable_Device(Device*);
|
||||
NV_STATUS __nvoc_ctor_Device(Device*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_Device(Device*);
|
||||
void __nvoc_dtor_Device(Device*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Device;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Device_Device = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Device,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Device,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Device_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Device_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Device_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Device_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource.__nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Device_GpuResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Device, __nvoc_base_GpuResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_Device = {
|
||||
/*numRelatives=*/ 6,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_Device_Device,
|
||||
&__nvoc_rtti_Device_GpuResource,
|
||||
&__nvoc_rtti_Device_RmResource,
|
||||
&__nvoc_rtti_Device_RmResourceCommon,
|
||||
&__nvoc_rtti_Device_RsResource,
|
||||
&__nvoc_rtti_Device_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_Device =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(Device),
|
||||
/*classId=*/ classId(Device),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "Device",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Device,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_Device,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_Device
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Device_gpuresControl(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return deviceControl((struct Device *)(((unsigned char *)pResource) - __nvoc_rtti_Device_GpuResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Device_gpuresInternalControlForward(struct GpuResource *pDevice, NvU32 command, void *pParams, NvU32 size) {
|
||||
return deviceInternalControlForward((struct Device *)(((unsigned char *)pDevice) - __nvoc_rtti_Device_GpuResource.offset), command, pParams, size);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_GpuResource_deviceShareCallback(struct Device *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_deviceUnmap(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_deviceGetMemInterMapParams(struct Device *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Device_RmResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_deviceGetMemoryMappingDescriptor(struct Device *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Device_RmResource.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_deviceGetMapAddrSpace(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static NvHandle __nvoc_thunk_GpuResource_deviceGetInternalObjectHandle(struct Device *pGpuResource) {
|
||||
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_deviceControlFilter(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_deviceAddAdditionalDependants(struct RsClient *pClient, struct Device *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_deviceGetRefCount(struct Device *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_deviceCheckMemInterUnmap(struct Device *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Device_RmResource.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_deviceMapTo(struct Device *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_deviceControl_Prologue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_deviceGetRegBaseOffsetAndSize(struct Device *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RsResource_deviceCanCopy(struct Device *pResource) {
|
||||
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_devicePreDestruct(struct Device *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_deviceUnmapFrom(struct Device *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_deviceControl_Epilogue(struct Device *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_deviceControlLookup(struct Device *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_deviceMap(struct Device *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Device_GpuResource.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_deviceAccessCallback(struct Device *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Device_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG)
|
||||
#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0)
|
||||
#endif
|
||||
|
||||
static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_Device[] =
|
||||
{
|
||||
{ /* [0] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetClasslist_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
|
||||
/*flags=*/ 0x813u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x800201u,
|
||||
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "deviceCtrlCmdGpuGetClasslist"
|
||||
#endif
|
||||
},
|
||||
{ /* [1] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetNumSubdevices_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
|
||||
/*flags=*/ 0x811u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x800280u,
|
||||
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_NUM_SUBDEVICES_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "deviceCtrlCmdGpuGetNumSubdevices"
|
||||
#endif
|
||||
},
|
||||
{ /* [2] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u)
|
||||
/*flags=*/ 0x5u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x800287u,
|
||||
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_MODIFY_SW_STATE_PERSISTENCE_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "deviceCtrlCmdGpuModifyGpuSwStatePersistence"
|
||||
#endif
|
||||
},
|
||||
{ /* [3] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
|
||||
/*flags=*/ 0x11u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x800288u,
|
||||
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_QUERY_SW_STATE_PERSISTENCE_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "deviceCtrlCmdGpuQueryGpuSwStatePersistence"
|
||||
#endif
|
||||
},
|
||||
{ /* [4] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetVirtualizationMode_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
|
||||
/*flags=*/ 0x810u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x800289u,
|
||||
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_VIRTUALIZATION_MODE_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "deviceCtrlCmdGpuGetVirtualizationMode"
|
||||
#endif
|
||||
},
|
||||
{ /* [5] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetClasslistV2_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
|
||||
/*flags=*/ 0x813u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x800292u,
|
||||
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_CLASSLIST_V2_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "deviceCtrlCmdGpuGetClasslistV2"
|
||||
#endif
|
||||
},
|
||||
{ /* [6] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
|
||||
/*flags=*/ 0x13u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x800293u,
|
||||
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_FIND_SUBDEVICE_HANDLE_PARAM),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "deviceCtrlCmdGpuGetFindSubDeviceHandle"
|
||||
#endif
|
||||
},
|
||||
{ /* [7] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuGetBrandCaps_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u)
|
||||
/*flags=*/ 0x211u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x800294u,
|
||||
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_GET_BRAND_CAPS_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "deviceCtrlCmdGpuGetBrandCaps"
|
||||
#endif
|
||||
},
|
||||
{ /* [8] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
|
||||
/*flags=*/ 0x204u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x800296u,
|
||||
/*paramSize=*/ sizeof(NV0080_CTRL_GPU_SET_VGPU_VF_BAR1_SIZE_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "deviceCtrlCmdGpuSetVgpuVfBar1Size"
|
||||
#endif
|
||||
},
|
||||
{ /* [9] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdOsUnixVTSwitch_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
|
||||
/*flags=*/ 0x1u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x801e01u,
|
||||
/*paramSize=*/ sizeof(NV0080_CTRL_OS_UNIX_VT_SWITCH_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "deviceCtrlCmdOsUnixVTSwitch"
|
||||
#endif
|
||||
},
|
||||
{ /* [10] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) deviceCtrlCmdOsUnixVTGetFBInfo_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
|
||||
/*flags=*/ 0x1u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x801e02u,
|
||||
/*paramSize=*/ sizeof(NV0080_CTRL_OS_UNIX_VT_GET_FB_INFO_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_Device.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "deviceCtrlCmdOsUnixVTGetFBInfo"
|
||||
#endif
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_Device =
|
||||
{
|
||||
/*numEntries=*/ 11,
|
||||
/*pExportEntries=*/ __nvoc_exported_method_def_Device
|
||||
};
|
||||
|
||||
void __nvoc_dtor_GpuResource(GpuResource*);
|
||||
void __nvoc_dtor_Device(Device *pThis) {
|
||||
__nvoc_deviceDestruct(pThis);
|
||||
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_Device(Device *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_Device(Device *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_Device_fail_GpuResource;
|
||||
__nvoc_init_dataField_Device(pThis);
|
||||
|
||||
status = __nvoc_deviceConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_Device_fail__init;
|
||||
goto __nvoc_ctor_Device_exit; // Success
|
||||
|
||||
__nvoc_ctor_Device_fail__init:
|
||||
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
__nvoc_ctor_Device_fail_GpuResource:
|
||||
__nvoc_ctor_Device_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_Device_1(Device *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__deviceControl__ = &deviceControl_IMPL;
|
||||
|
||||
pThis->__deviceInternalControlForward__ = &deviceInternalControlForward_IMPL;
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
|
||||
pThis->__deviceCtrlCmdGpuGetClasslist__ = &deviceCtrlCmdGpuGetClasslist_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x813u)
|
||||
pThis->__deviceCtrlCmdGpuGetClasslistV2__ = &deviceCtrlCmdGpuGetClasslistV2_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x811u)
|
||||
pThis->__deviceCtrlCmdGpuGetNumSubdevices__ = &deviceCtrlCmdGpuGetNumSubdevices_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x5u)
|
||||
pThis->__deviceCtrlCmdGpuModifyGpuSwStatePersistence__ = &deviceCtrlCmdGpuModifyGpuSwStatePersistence_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
|
||||
pThis->__deviceCtrlCmdGpuQueryGpuSwStatePersistence__ = &deviceCtrlCmdGpuQueryGpuSwStatePersistence_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x810u)
|
||||
pThis->__deviceCtrlCmdGpuGetVirtualizationMode__ = &deviceCtrlCmdGpuGetVirtualizationMode_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x204u)
|
||||
pThis->__deviceCtrlCmdGpuSetVgpuVfBar1Size__ = &deviceCtrlCmdGpuSetVgpuVfBar1Size_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x211u)
|
||||
pThis->__deviceCtrlCmdGpuGetBrandCaps__ = &deviceCtrlCmdGpuGetBrandCaps_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x13u)
|
||||
pThis->__deviceCtrlCmdGpuGetFindSubDeviceHandle__ = &deviceCtrlCmdGpuGetFindSubDeviceHandle_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
|
||||
pThis->__deviceCtrlCmdOsUnixVTSwitch__ = &deviceCtrlCmdOsUnixVTSwitch_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x1u)
|
||||
pThis->__deviceCtrlCmdOsUnixVTGetFBInfo__ = &deviceCtrlCmdOsUnixVTGetFBInfo_IMPL;
|
||||
#endif
|
||||
|
||||
pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_Device_gpuresControl;
|
||||
|
||||
pThis->__nvoc_base_GpuResource.__gpuresInternalControlForward__ = &__nvoc_thunk_Device_gpuresInternalControlForward;
|
||||
|
||||
pThis->__deviceShareCallback__ = &__nvoc_thunk_GpuResource_deviceShareCallback;
|
||||
|
||||
pThis->__deviceUnmap__ = &__nvoc_thunk_GpuResource_deviceUnmap;
|
||||
|
||||
pThis->__deviceGetMemInterMapParams__ = &__nvoc_thunk_RmResource_deviceGetMemInterMapParams;
|
||||
|
||||
pThis->__deviceGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_deviceGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__deviceGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_deviceGetMapAddrSpace;
|
||||
|
||||
pThis->__deviceGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_deviceGetInternalObjectHandle;
|
||||
|
||||
pThis->__deviceControlFilter__ = &__nvoc_thunk_RsResource_deviceControlFilter;
|
||||
|
||||
pThis->__deviceAddAdditionalDependants__ = &__nvoc_thunk_RsResource_deviceAddAdditionalDependants;
|
||||
|
||||
pThis->__deviceGetRefCount__ = &__nvoc_thunk_RsResource_deviceGetRefCount;
|
||||
|
||||
pThis->__deviceCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_deviceCheckMemInterUnmap;
|
||||
|
||||
pThis->__deviceMapTo__ = &__nvoc_thunk_RsResource_deviceMapTo;
|
||||
|
||||
pThis->__deviceControl_Prologue__ = &__nvoc_thunk_RmResource_deviceControl_Prologue;
|
||||
|
||||
pThis->__deviceGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_deviceGetRegBaseOffsetAndSize;
|
||||
|
||||
pThis->__deviceCanCopy__ = &__nvoc_thunk_RsResource_deviceCanCopy;
|
||||
|
||||
pThis->__devicePreDestruct__ = &__nvoc_thunk_RsResource_devicePreDestruct;
|
||||
|
||||
pThis->__deviceUnmapFrom__ = &__nvoc_thunk_RsResource_deviceUnmapFrom;
|
||||
|
||||
pThis->__deviceControl_Epilogue__ = &__nvoc_thunk_RmResource_deviceControl_Epilogue;
|
||||
|
||||
pThis->__deviceControlLookup__ = &__nvoc_thunk_RsResource_deviceControlLookup;
|
||||
|
||||
pThis->__deviceMap__ = &__nvoc_thunk_GpuResource_deviceMap;
|
||||
|
||||
pThis->__deviceAccessCallback__ = &__nvoc_thunk_RmResource_deviceAccessCallback;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_Device(Device *pThis) {
|
||||
__nvoc_init_funcTable_Device_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_GpuResource(GpuResource*);
|
||||
void __nvoc_init_Device(Device *pThis) {
|
||||
pThis->__nvoc_pbase_Device = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
|
||||
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
|
||||
__nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
__nvoc_init_funcTable_Device(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_Device(Device **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
Device *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(Device));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(Device));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Device);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_Device(pThis);
|
||||
status = __nvoc_ctor_Device(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_Device_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_Device_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_Device(Device **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_Device(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,329 +0,0 @@
|
||||
#define NVOC_DISP_CAPABILITIES_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_disp_capabilities_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x99db3e = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
|
||||
|
||||
void __nvoc_init_DispCapabilities(DispCapabilities*);
|
||||
void __nvoc_init_funcTable_DispCapabilities(DispCapabilities*);
|
||||
NV_STATUS __nvoc_ctor_DispCapabilities(DispCapabilities*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_DispCapabilities(DispCapabilities*);
|
||||
void __nvoc_dtor_DispCapabilities(DispCapabilities*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispCapabilities;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_DispCapabilities = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_DispCapabilities,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispCapabilities,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource.__nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_DispCapabilities_GpuResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(DispCapabilities, __nvoc_base_GpuResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_DispCapabilities = {
|
||||
/*numRelatives=*/ 6,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_DispCapabilities_DispCapabilities,
|
||||
&__nvoc_rtti_DispCapabilities_GpuResource,
|
||||
&__nvoc_rtti_DispCapabilities_RmResource,
|
||||
&__nvoc_rtti_DispCapabilities_RmResourceCommon,
|
||||
&__nvoc_rtti_DispCapabilities_RsResource,
|
||||
&__nvoc_rtti_DispCapabilities_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(DispCapabilities),
|
||||
/*classId=*/ classId(DispCapabilities),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "DispCapabilities",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispCapabilities,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_DispCapabilities,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_DispCapabilities
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_DispCapabilities_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return dispcapGetRegBaseOffsetAndSize((struct DispCapabilities *)(((unsigned char *)pDispCapabilities) - __nvoc_rtti_DispCapabilities_GpuResource.offset), pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_GpuResource_dispcapShareCallback(struct DispCapabilities *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_dispcapControl(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_dispcapUnmap(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_dispcapGetMemInterMapParams(struct DispCapabilities *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_dispcapGetMemoryMappingDescriptor(struct DispCapabilities *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_dispcapGetMapAddrSpace(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static NvHandle __nvoc_thunk_GpuResource_dispcapGetInternalObjectHandle(struct DispCapabilities *pGpuResource) {
|
||||
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_dispcapControlFilter(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_dispcapAddAdditionalDependants(struct RsClient *pClient, struct DispCapabilities *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_dispcapGetRefCount(struct DispCapabilities *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_dispcapCheckMemInterUnmap(struct DispCapabilities *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_dispcapMapTo(struct DispCapabilities *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_dispcapControl_Prologue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RsResource_dispcapCanCopy(struct DispCapabilities *pResource) {
|
||||
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_dispcapInternalControlForward(struct DispCapabilities *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), command, pParams, size);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_dispcapPreDestruct(struct DispCapabilities *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_dispcapUnmapFrom(struct DispCapabilities *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_dispcapControl_Epilogue(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_dispcapControlLookup(struct DispCapabilities *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_dispcapMap(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispCapabilities_GpuResource.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_dispcapAccessCallback(struct DispCapabilities *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispCapabilities_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_DispCapabilities =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_GpuResource(GpuResource*);
|
||||
void __nvoc_dtor_DispCapabilities(DispCapabilities *pThis) {
|
||||
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_DispCapabilities(DispCapabilities *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_DispCapabilities(DispCapabilities *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_DispCapabilities_fail_GpuResource;
|
||||
__nvoc_init_dataField_DispCapabilities(pThis);
|
||||
|
||||
status = __nvoc_dispcapConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_DispCapabilities_fail__init;
|
||||
goto __nvoc_ctor_DispCapabilities_exit; // Success
|
||||
|
||||
__nvoc_ctor_DispCapabilities_fail__init:
|
||||
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
__nvoc_ctor_DispCapabilities_fail_GpuResource:
|
||||
__nvoc_ctor_DispCapabilities_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_DispCapabilities_1(DispCapabilities *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__dispcapGetRegBaseOffsetAndSize__ = &dispcapGetRegBaseOffsetAndSize_IMPL;
|
||||
|
||||
pThis->__nvoc_base_GpuResource.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispCapabilities_gpuresGetRegBaseOffsetAndSize;
|
||||
|
||||
pThis->__dispcapShareCallback__ = &__nvoc_thunk_GpuResource_dispcapShareCallback;
|
||||
|
||||
pThis->__dispcapControl__ = &__nvoc_thunk_GpuResource_dispcapControl;
|
||||
|
||||
pThis->__dispcapUnmap__ = &__nvoc_thunk_GpuResource_dispcapUnmap;
|
||||
|
||||
pThis->__dispcapGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispcapGetMemInterMapParams;
|
||||
|
||||
pThis->__dispcapGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispcapGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__dispcapGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispcapGetMapAddrSpace;
|
||||
|
||||
pThis->__dispcapGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispcapGetInternalObjectHandle;
|
||||
|
||||
pThis->__dispcapControlFilter__ = &__nvoc_thunk_RsResource_dispcapControlFilter;
|
||||
|
||||
pThis->__dispcapAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispcapAddAdditionalDependants;
|
||||
|
||||
pThis->__dispcapGetRefCount__ = &__nvoc_thunk_RsResource_dispcapGetRefCount;
|
||||
|
||||
pThis->__dispcapCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispcapCheckMemInterUnmap;
|
||||
|
||||
pThis->__dispcapMapTo__ = &__nvoc_thunk_RsResource_dispcapMapTo;
|
||||
|
||||
pThis->__dispcapControl_Prologue__ = &__nvoc_thunk_RmResource_dispcapControl_Prologue;
|
||||
|
||||
pThis->__dispcapCanCopy__ = &__nvoc_thunk_RsResource_dispcapCanCopy;
|
||||
|
||||
pThis->__dispcapInternalControlForward__ = &__nvoc_thunk_GpuResource_dispcapInternalControlForward;
|
||||
|
||||
pThis->__dispcapPreDestruct__ = &__nvoc_thunk_RsResource_dispcapPreDestruct;
|
||||
|
||||
pThis->__dispcapUnmapFrom__ = &__nvoc_thunk_RsResource_dispcapUnmapFrom;
|
||||
|
||||
pThis->__dispcapControl_Epilogue__ = &__nvoc_thunk_RmResource_dispcapControl_Epilogue;
|
||||
|
||||
pThis->__dispcapControlLookup__ = &__nvoc_thunk_RsResource_dispcapControlLookup;
|
||||
|
||||
pThis->__dispcapMap__ = &__nvoc_thunk_GpuResource_dispcapMap;
|
||||
|
||||
pThis->__dispcapAccessCallback__ = &__nvoc_thunk_RmResource_dispcapAccessCallback;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_DispCapabilities(DispCapabilities *pThis) {
|
||||
__nvoc_init_funcTable_DispCapabilities_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_GpuResource(GpuResource*);
|
||||
void __nvoc_init_DispCapabilities(DispCapabilities *pThis) {
|
||||
pThis->__nvoc_pbase_DispCapabilities = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
|
||||
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
|
||||
__nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
__nvoc_init_funcTable_DispCapabilities(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_DispCapabilities(DispCapabilities **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
DispCapabilities *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(DispCapabilities));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(DispCapabilities));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispCapabilities);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_DispCapabilities(pThis);
|
||||
status = __nvoc_ctor_DispCapabilities(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_DispCapabilities_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_DispCapabilities_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_DispCapabilities(DispCapabilities **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_DispCapabilities(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,239 +0,0 @@
|
||||
#ifndef _G_DISP_CAPABILITIES_NVOC_H_
|
||||
#define _G_DISP_CAPABILITIES_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* Description:
|
||||
* This file contains functions managing DispCapabilities class.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
#include "g_disp_capabilities_nvoc.h"
|
||||
|
||||
#ifndef DISP_CAPABILITIES_H
|
||||
#define DISP_CAPABILITIES_H
|
||||
|
||||
#include "gpu/gpu_resource.h"
|
||||
|
||||
/*!
|
||||
* RM internal class representing NVXXXX_DISP_CAPABILITIES
|
||||
*/
|
||||
#ifdef NVOC_DISP_CAPABILITIES_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct DispCapabilities {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct GpuResource __nvoc_base_GpuResource;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct GpuResource *__nvoc_pbase_GpuResource;
|
||||
struct DispCapabilities *__nvoc_pbase_DispCapabilities;
|
||||
NV_STATUS (*__dispcapGetRegBaseOffsetAndSize__)(struct DispCapabilities *, struct OBJGPU *, NvU32 *, NvU32 *);
|
||||
NvBool (*__dispcapShareCallback__)(struct DispCapabilities *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NV_STATUS (*__dispcapControl__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispcapUnmap__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RsCpuMapping *);
|
||||
NV_STATUS (*__dispcapGetMemInterMapParams__)(struct DispCapabilities *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__dispcapGetMemoryMappingDescriptor__)(struct DispCapabilities *, struct MEMORY_DESCRIPTOR **);
|
||||
NV_STATUS (*__dispcapGetMapAddrSpace__)(struct DispCapabilities *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
|
||||
NvHandle (*__dispcapGetInternalObjectHandle__)(struct DispCapabilities *);
|
||||
NV_STATUS (*__dispcapControlFilter__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
void (*__dispcapAddAdditionalDependants__)(struct RsClient *, struct DispCapabilities *, RsResourceRef *);
|
||||
NvU32 (*__dispcapGetRefCount__)(struct DispCapabilities *);
|
||||
NV_STATUS (*__dispcapCheckMemInterUnmap__)(struct DispCapabilities *, NvBool);
|
||||
NV_STATUS (*__dispcapMapTo__)(struct DispCapabilities *, RS_RES_MAP_TO_PARAMS *);
|
||||
NV_STATUS (*__dispcapControl_Prologue__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NvBool (*__dispcapCanCopy__)(struct DispCapabilities *);
|
||||
NV_STATUS (*__dispcapInternalControlForward__)(struct DispCapabilities *, NvU32, void *, NvU32);
|
||||
void (*__dispcapPreDestruct__)(struct DispCapabilities *);
|
||||
NV_STATUS (*__dispcapUnmapFrom__)(struct DispCapabilities *, RS_RES_UNMAP_FROM_PARAMS *);
|
||||
void (*__dispcapControl_Epilogue__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispcapControlLookup__)(struct DispCapabilities *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NV_STATUS (*__dispcapMap__)(struct DispCapabilities *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
|
||||
NvBool (*__dispcapAccessCallback__)(struct DispCapabilities *, struct RsClient *, void *, RsAccessRight);
|
||||
NvU32 ControlOffset;
|
||||
NvU32 ControlLength;
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_DispCapabilities_TYPEDEF__
|
||||
#define __NVOC_CLASS_DispCapabilities_TYPEDEF__
|
||||
typedef struct DispCapabilities DispCapabilities;
|
||||
#endif /* __NVOC_CLASS_DispCapabilities_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_DispCapabilities
|
||||
#define __nvoc_class_id_DispCapabilities 0x99db3e
|
||||
#endif /* __nvoc_class_id_DispCapabilities */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispCapabilities;
|
||||
|
||||
#define __staticCast_DispCapabilities(pThis) \
|
||||
((pThis)->__nvoc_pbase_DispCapabilities)
|
||||
|
||||
#ifdef __nvoc_disp_capabilities_h_disabled
|
||||
#define __dynamicCast_DispCapabilities(pThis) ((DispCapabilities*)NULL)
|
||||
#else //__nvoc_disp_capabilities_h_disabled
|
||||
#define __dynamicCast_DispCapabilities(pThis) \
|
||||
((DispCapabilities*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispCapabilities)))
|
||||
#endif //__nvoc_disp_capabilities_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_DispCapabilities(DispCapabilities**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_DispCapabilities(DispCapabilities**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
#define __objCreate_DispCapabilities(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
|
||||
__nvoc_objCreate_DispCapabilities((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
|
||||
|
||||
#define dispcapGetRegBaseOffsetAndSize(pDispCapabilities, pGpu, pOffset, pSize) dispcapGetRegBaseOffsetAndSize_DISPATCH(pDispCapabilities, pGpu, pOffset, pSize)
|
||||
#define dispcapShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispcapShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define dispcapControl(pGpuResource, pCallContext, pParams) dispcapControl_DISPATCH(pGpuResource, pCallContext, pParams)
|
||||
#define dispcapUnmap(pGpuResource, pCallContext, pCpuMapping) dispcapUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
|
||||
#define dispcapGetMemInterMapParams(pRmResource, pParams) dispcapGetMemInterMapParams_DISPATCH(pRmResource, pParams)
|
||||
#define dispcapGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispcapGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
|
||||
#define dispcapGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispcapGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
|
||||
#define dispcapGetInternalObjectHandle(pGpuResource) dispcapGetInternalObjectHandle_DISPATCH(pGpuResource)
|
||||
#define dispcapControlFilter(pResource, pCallContext, pParams) dispcapControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispcapAddAdditionalDependants(pClient, pResource, pReference) dispcapAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define dispcapGetRefCount(pResource) dispcapGetRefCount_DISPATCH(pResource)
|
||||
#define dispcapCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispcapCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
|
||||
#define dispcapMapTo(pResource, pParams) dispcapMapTo_DISPATCH(pResource, pParams)
|
||||
#define dispcapControl_Prologue(pResource, pCallContext, pParams) dispcapControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispcapCanCopy(pResource) dispcapCanCopy_DISPATCH(pResource)
|
||||
#define dispcapInternalControlForward(pGpuResource, command, pParams, size) dispcapInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
|
||||
#define dispcapPreDestruct(pResource) dispcapPreDestruct_DISPATCH(pResource)
|
||||
#define dispcapUnmapFrom(pResource, pParams) dispcapUnmapFrom_DISPATCH(pResource, pParams)
|
||||
#define dispcapControl_Epilogue(pResource, pCallContext, pParams) dispcapControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispcapControlLookup(pResource, pParams, ppEntry) dispcapControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define dispcapMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispcapMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
|
||||
#define dispcapAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispcapAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
NV_STATUS dispcapGetRegBaseOffsetAndSize_IMPL(struct DispCapabilities *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize);
|
||||
|
||||
static inline NV_STATUS dispcapGetRegBaseOffsetAndSize_DISPATCH(struct DispCapabilities *pDispCapabilities, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return pDispCapabilities->__dispcapGetRegBaseOffsetAndSize__(pDispCapabilities, pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static inline NvBool dispcapShareCallback_DISPATCH(struct DispCapabilities *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pGpuResource->__dispcapShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispcapControl_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pGpuResource->__dispcapControl__(pGpuResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispcapUnmap_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__dispcapUnmap__(pGpuResource, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispcapGetMemInterMapParams_DISPATCH(struct DispCapabilities *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pRmResource->__dispcapGetMemInterMapParams__(pRmResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispcapGetMemoryMappingDescriptor_DISPATCH(struct DispCapabilities *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pRmResource->__dispcapGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispcapGetMapAddrSpace_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return pGpuResource->__dispcapGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static inline NvHandle dispcapGetInternalObjectHandle_DISPATCH(struct DispCapabilities *pGpuResource) {
|
||||
return pGpuResource->__dispcapGetInternalObjectHandle__(pGpuResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispcapControlFilter_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__dispcapControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline void dispcapAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispCapabilities *pResource, RsResourceRef *pReference) {
|
||||
pResource->__dispcapAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline NvU32 dispcapGetRefCount_DISPATCH(struct DispCapabilities *pResource) {
|
||||
return pResource->__dispcapGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispcapCheckMemInterUnmap_DISPATCH(struct DispCapabilities *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return pRmResource->__dispcapCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispcapMapTo_DISPATCH(struct DispCapabilities *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pResource->__dispcapMapTo__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispcapControl_Prologue_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__dispcapControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NvBool dispcapCanCopy_DISPATCH(struct DispCapabilities *pResource) {
|
||||
return pResource->__dispcapCanCopy__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispcapInternalControlForward_DISPATCH(struct DispCapabilities *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return pGpuResource->__dispcapInternalControlForward__(pGpuResource, command, pParams, size);
|
||||
}
|
||||
|
||||
static inline void dispcapPreDestruct_DISPATCH(struct DispCapabilities *pResource) {
|
||||
pResource->__dispcapPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispcapUnmapFrom_DISPATCH(struct DispCapabilities *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pResource->__dispcapUnmapFrom__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void dispcapControl_Epilogue_DISPATCH(struct DispCapabilities *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__dispcapControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispcapControlLookup_DISPATCH(struct DispCapabilities *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__dispcapControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispcapMap_DISPATCH(struct DispCapabilities *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__dispcapMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NvBool dispcapAccessCallback_DISPATCH(struct DispCapabilities *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__dispcapAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
NV_STATUS dispcapConstruct_IMPL(struct DispCapabilities *arg_pDispCapabilities, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
|
||||
#define __nvoc_dispcapConstruct(arg_pDispCapabilities, arg_pCallContext, arg_pParams) dispcapConstruct_IMPL(arg_pDispCapabilities, arg_pCallContext, arg_pParams)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#endif // DISP_CAPABILITIES_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_DISP_CAPABILITIES_NVOC_H_
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,776 +0,0 @@
|
||||
#ifndef _G_DISP_CHANNEL_NVOC_H_
|
||||
#define _G_DISP_CHANNEL_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* Description:
|
||||
* This file contains functions managing DispChannel and its derived classes.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
#include "g_disp_channel_nvoc.h"
|
||||
|
||||
#ifndef DISP_CHANNEL_H
|
||||
#define DISP_CHANNEL_H
|
||||
|
||||
#include "gpu/gpu_resource.h"
|
||||
#include "rmapi/event.h"
|
||||
|
||||
struct ContextDma;
|
||||
|
||||
#ifndef __NVOC_CLASS_ContextDma_TYPEDEF__
|
||||
#define __NVOC_CLASS_ContextDma_TYPEDEF__
|
||||
typedef struct ContextDma ContextDma;
|
||||
#endif /* __NVOC_CLASS_ContextDma_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_ContextDma
|
||||
#define __nvoc_class_id_ContextDma 0x88441b
|
||||
#endif /* __nvoc_class_id_ContextDma */
|
||||
|
||||
|
||||
struct DispObject;
|
||||
|
||||
#ifndef __NVOC_CLASS_DispObject_TYPEDEF__
|
||||
#define __NVOC_CLASS_DispObject_TYPEDEF__
|
||||
typedef struct DispObject DispObject;
|
||||
#endif /* __NVOC_CLASS_DispObject_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_DispObject
|
||||
#define __nvoc_class_id_DispObject 0x999839
|
||||
#endif /* __nvoc_class_id_DispObject */
|
||||
|
||||
|
||||
|
||||
/*!
|
||||
* Base class for display channels
|
||||
*/
|
||||
#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct DispChannel {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct GpuResource __nvoc_base_GpuResource;
|
||||
struct Notifier __nvoc_base_Notifier;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct GpuResource *__nvoc_pbase_GpuResource;
|
||||
struct INotifier *__nvoc_pbase_INotifier;
|
||||
struct Notifier *__nvoc_pbase_Notifier;
|
||||
struct DispChannel *__nvoc_pbase_DispChannel;
|
||||
NV_STATUS (*__dispchnGetRegBaseOffsetAndSize__)(struct DispChannel *, struct OBJGPU *, NvU32 *, NvU32 *);
|
||||
NvBool (*__dispchnShareCallback__)(struct DispChannel *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NV_STATUS (*__dispchnMapTo__)(struct DispChannel *, RS_RES_MAP_TO_PARAMS *);
|
||||
NV_STATUS (*__dispchnGetOrAllocNotifShare__)(struct DispChannel *, NvHandle, NvHandle, struct NotifShare **);
|
||||
NV_STATUS (*__dispchnCheckMemInterUnmap__)(struct DispChannel *, NvBool);
|
||||
NV_STATUS (*__dispchnGetMapAddrSpace__)(struct DispChannel *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
|
||||
void (*__dispchnSetNotificationShare__)(struct DispChannel *, struct NotifShare *);
|
||||
NvU32 (*__dispchnGetRefCount__)(struct DispChannel *);
|
||||
void (*__dispchnAddAdditionalDependants__)(struct RsClient *, struct DispChannel *, RsResourceRef *);
|
||||
NV_STATUS (*__dispchnControl_Prologue__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispchnInternalControlForward__)(struct DispChannel *, NvU32, void *, NvU32);
|
||||
NV_STATUS (*__dispchnUnmapFrom__)(struct DispChannel *, RS_RES_UNMAP_FROM_PARAMS *);
|
||||
void (*__dispchnControl_Epilogue__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispchnControlLookup__)(struct DispChannel *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NvHandle (*__dispchnGetInternalObjectHandle__)(struct DispChannel *);
|
||||
NV_STATUS (*__dispchnControl__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispchnUnmap__)(struct DispChannel *, struct CALL_CONTEXT *, struct RsCpuMapping *);
|
||||
NV_STATUS (*__dispchnGetMemInterMapParams__)(struct DispChannel *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__dispchnGetMemoryMappingDescriptor__)(struct DispChannel *, struct MEMORY_DESCRIPTOR **);
|
||||
NV_STATUS (*__dispchnControlFilter__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispchnUnregisterEvent__)(struct DispChannel *, NvHandle, NvHandle, NvHandle, NvHandle);
|
||||
NvBool (*__dispchnCanCopy__)(struct DispChannel *);
|
||||
void (*__dispchnPreDestruct__)(struct DispChannel *);
|
||||
PEVENTNOTIFICATION *(*__dispchnGetNotificationListPtr__)(struct DispChannel *);
|
||||
struct NotifShare *(*__dispchnGetNotificationShare__)(struct DispChannel *);
|
||||
NV_STATUS (*__dispchnMap__)(struct DispChannel *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
|
||||
NvBool (*__dispchnAccessCallback__)(struct DispChannel *, struct RsClient *, void *, RsAccessRight);
|
||||
struct DispObject *pDispObject;
|
||||
NvU32 DispClass;
|
||||
NvU32 InstanceNumber;
|
||||
NvP64 pControl;
|
||||
NvP64 pPriv;
|
||||
NvU32 ControlOffset;
|
||||
NvU32 ControlLength;
|
||||
NvBool bIsDma;
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__
|
||||
#define __NVOC_CLASS_DispChannel_TYPEDEF__
|
||||
typedef struct DispChannel DispChannel;
|
||||
#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_DispChannel
|
||||
#define __nvoc_class_id_DispChannel 0xbd2ff3
|
||||
#endif /* __nvoc_class_id_DispChannel */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannel;
|
||||
|
||||
#define __staticCast_DispChannel(pThis) \
|
||||
((pThis)->__nvoc_pbase_DispChannel)
|
||||
|
||||
#ifdef __nvoc_disp_channel_h_disabled
|
||||
#define __dynamicCast_DispChannel(pThis) ((DispChannel*)NULL)
|
||||
#else //__nvoc_disp_channel_h_disabled
|
||||
#define __dynamicCast_DispChannel(pThis) \
|
||||
((DispChannel*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannel)))
|
||||
#endif //__nvoc_disp_channel_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_DispChannel(DispChannel**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_DispChannel(DispChannel**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams, NvU32 arg_isDma);
|
||||
#define __objCreate_DispChannel(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams, arg_isDma) \
|
||||
__nvoc_objCreate_DispChannel((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams, arg_isDma)
|
||||
|
||||
#define dispchnGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchnGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize)
|
||||
#define dispchnShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchnShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define dispchnMapTo(pResource, pParams) dispchnMapTo_DISPATCH(pResource, pParams)
|
||||
#define dispchnGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchnGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
|
||||
#define dispchnCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchnCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
|
||||
#define dispchnGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchnGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
|
||||
#define dispchnSetNotificationShare(pNotifier, pNotifShare) dispchnSetNotificationShare_DISPATCH(pNotifier, pNotifShare)
|
||||
#define dispchnGetRefCount(pResource) dispchnGetRefCount_DISPATCH(pResource)
|
||||
#define dispchnAddAdditionalDependants(pClient, pResource, pReference) dispchnAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define dispchnControl_Prologue(pResource, pCallContext, pParams) dispchnControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispchnInternalControlForward(pGpuResource, command, pParams, size) dispchnInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
|
||||
#define dispchnUnmapFrom(pResource, pParams) dispchnUnmapFrom_DISPATCH(pResource, pParams)
|
||||
#define dispchnControl_Epilogue(pResource, pCallContext, pParams) dispchnControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispchnControlLookup(pResource, pParams, ppEntry) dispchnControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define dispchnGetInternalObjectHandle(pGpuResource) dispchnGetInternalObjectHandle_DISPATCH(pGpuResource)
|
||||
#define dispchnControl(pGpuResource, pCallContext, pParams) dispchnControl_DISPATCH(pGpuResource, pCallContext, pParams)
|
||||
#define dispchnUnmap(pGpuResource, pCallContext, pCpuMapping) dispchnUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
|
||||
#define dispchnGetMemInterMapParams(pRmResource, pParams) dispchnGetMemInterMapParams_DISPATCH(pRmResource, pParams)
|
||||
#define dispchnGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchnGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
|
||||
#define dispchnControlFilter(pResource, pCallContext, pParams) dispchnControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispchnUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchnUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent)
|
||||
#define dispchnCanCopy(pResource) dispchnCanCopy_DISPATCH(pResource)
|
||||
#define dispchnPreDestruct(pResource) dispchnPreDestruct_DISPATCH(pResource)
|
||||
#define dispchnGetNotificationListPtr(pNotifier) dispchnGetNotificationListPtr_DISPATCH(pNotifier)
|
||||
#define dispchnGetNotificationShare(pNotifier) dispchnGetNotificationShare_DISPATCH(pNotifier)
|
||||
#define dispchnMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchnMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
|
||||
#define dispchnAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchnAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
NV_STATUS dispchnGetRegBaseOffsetAndSize_IMPL(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize);
|
||||
|
||||
static inline NV_STATUS dispchnGetRegBaseOffsetAndSize_DISPATCH(struct DispChannel *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return pDispChannel->__dispchnGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static inline NvBool dispchnShareCallback_DISPATCH(struct DispChannel *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pGpuResource->__dispchnShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnMapTo_DISPATCH(struct DispChannel *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pResource->__dispchnMapTo__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnGetOrAllocNotifShare_DISPATCH(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
|
||||
return pNotifier->__dispchnGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnCheckMemInterUnmap_DISPATCH(struct DispChannel *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return pRmResource->__dispchnCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnGetMapAddrSpace_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return pGpuResource->__dispchnGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static inline void dispchnSetNotificationShare_DISPATCH(struct DispChannel *pNotifier, struct NotifShare *pNotifShare) {
|
||||
pNotifier->__dispchnSetNotificationShare__(pNotifier, pNotifShare);
|
||||
}
|
||||
|
||||
static inline NvU32 dispchnGetRefCount_DISPATCH(struct DispChannel *pResource) {
|
||||
return pResource->__dispchnGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline void dispchnAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannel *pResource, RsResourceRef *pReference) {
|
||||
pResource->__dispchnAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnControl_Prologue_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__dispchnControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnInternalControlForward_DISPATCH(struct DispChannel *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return pGpuResource->__dispchnInternalControlForward__(pGpuResource, command, pParams, size);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnUnmapFrom_DISPATCH(struct DispChannel *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pResource->__dispchnUnmapFrom__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void dispchnControl_Epilogue_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__dispchnControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnControlLookup_DISPATCH(struct DispChannel *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__dispchnControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NvHandle dispchnGetInternalObjectHandle_DISPATCH(struct DispChannel *pGpuResource) {
|
||||
return pGpuResource->__dispchnGetInternalObjectHandle__(pGpuResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnControl_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pGpuResource->__dispchnControl__(pGpuResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnUnmap_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__dispchnUnmap__(pGpuResource, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnGetMemInterMapParams_DISPATCH(struct DispChannel *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pRmResource->__dispchnGetMemInterMapParams__(pRmResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnGetMemoryMappingDescriptor_DISPATCH(struct DispChannel *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pRmResource->__dispchnGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnControlFilter_DISPATCH(struct DispChannel *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__dispchnControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnUnregisterEvent_DISPATCH(struct DispChannel *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
|
||||
return pNotifier->__dispchnUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent);
|
||||
}
|
||||
|
||||
static inline NvBool dispchnCanCopy_DISPATCH(struct DispChannel *pResource) {
|
||||
return pResource->__dispchnCanCopy__(pResource);
|
||||
}
|
||||
|
||||
static inline void dispchnPreDestruct_DISPATCH(struct DispChannel *pResource) {
|
||||
pResource->__dispchnPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline PEVENTNOTIFICATION *dispchnGetNotificationListPtr_DISPATCH(struct DispChannel *pNotifier) {
|
||||
return pNotifier->__dispchnGetNotificationListPtr__(pNotifier);
|
||||
}
|
||||
|
||||
static inline struct NotifShare *dispchnGetNotificationShare_DISPATCH(struct DispChannel *pNotifier) {
|
||||
return pNotifier->__dispchnGetNotificationShare__(pNotifier);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnMap_DISPATCH(struct DispChannel *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__dispchnMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NvBool dispchnAccessCallback_DISPATCH(struct DispChannel *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__dispchnAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
NV_STATUS dispchnConstruct_IMPL(struct DispChannel *arg_pDispChannel, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams, NvU32 arg_isDma);
|
||||
#define __nvoc_dispchnConstruct(arg_pDispChannel, arg_pCallContext, arg_pParams, arg_isDma) dispchnConstruct_IMPL(arg_pDispChannel, arg_pCallContext, arg_pParams, arg_isDma)
|
||||
void dispchnDestruct_IMPL(struct DispChannel *pDispChannel);
|
||||
#define __nvoc_dispchnDestruct(pDispChannel) dispchnDestruct_IMPL(pDispChannel)
|
||||
void dispchnSetRegBaseOffsetAndSize_IMPL(struct DispChannel *pDispChannel, struct OBJGPU *pGpu);
|
||||
#ifdef __nvoc_disp_channel_h_disabled
|
||||
static inline void dispchnSetRegBaseOffsetAndSize(struct DispChannel *pDispChannel, struct OBJGPU *pGpu) {
|
||||
NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!");
|
||||
}
|
||||
#else //__nvoc_disp_channel_h_disabled
|
||||
#define dispchnSetRegBaseOffsetAndSize(pDispChannel, pGpu) dispchnSetRegBaseOffsetAndSize_IMPL(pDispChannel, pGpu)
|
||||
#endif //__nvoc_disp_channel_h_disabled
|
||||
|
||||
NV_STATUS dispchnGrabChannel_IMPL(struct DispChannel *pDispChannel, NvHandle hClient, NvHandle hParent, NvHandle hChannel, NvU32 hClass, void *pAllocParms);
|
||||
#ifdef __nvoc_disp_channel_h_disabled
|
||||
static inline NV_STATUS dispchnGrabChannel(struct DispChannel *pDispChannel, NvHandle hClient, NvHandle hParent, NvHandle hChannel, NvU32 hClass, void *pAllocParms) {
|
||||
NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_disp_channel_h_disabled
|
||||
#define dispchnGrabChannel(pDispChannel, hClient, hParent, hChannel, hClass, pAllocParms) dispchnGrabChannel_IMPL(pDispChannel, hClient, hParent, hChannel, hClass, pAllocParms)
|
||||
#endif //__nvoc_disp_channel_h_disabled
|
||||
|
||||
NV_STATUS dispchnBindCtx_IMPL(struct OBJGPU *pGpu, struct ContextDma *pContextDma, NvHandle hDispChannel);
|
||||
#define dispchnBindCtx(pGpu, pContextDma, hDispChannel) dispchnBindCtx_IMPL(pGpu, pContextDma, hDispChannel)
|
||||
NV_STATUS dispchnUnbindCtx_IMPL(struct OBJGPU *pGpu, struct ContextDma *pContextDma, NvHandle hDispChannel);
|
||||
#define dispchnUnbindCtx(pGpu, pContextDma, hDispChannel) dispchnUnbindCtx_IMPL(pGpu, pContextDma, hDispChannel)
|
||||
void dispchnUnbindCtxFromAllChannels_IMPL(struct OBJGPU *pGpu, struct ContextDma *pContextDma);
|
||||
#define dispchnUnbindCtxFromAllChannels(pGpu, pContextDma) dispchnUnbindCtxFromAllChannels_IMPL(pGpu, pContextDma)
|
||||
void dispchnUnbindAllCtx_IMPL(struct OBJGPU *pGpu, struct DispChannel *pDispChannel);
|
||||
#ifdef __nvoc_disp_channel_h_disabled
|
||||
static inline void dispchnUnbindAllCtx(struct OBJGPU *pGpu, struct DispChannel *pDispChannel) {
|
||||
NV_ASSERT_FAILED_PRECOMP("DispChannel was disabled!");
|
||||
}
|
||||
#else //__nvoc_disp_channel_h_disabled
|
||||
#define dispchnUnbindAllCtx(pGpu, pDispChannel) dispchnUnbindAllCtx_IMPL(pGpu, pDispChannel)
|
||||
#endif //__nvoc_disp_channel_h_disabled
|
||||
|
||||
NV_STATUS dispchnGetByHandle_IMPL(struct RsClient *pClient, NvHandle hDisplayChannel, struct DispChannel **ppDispChannel);
|
||||
#define dispchnGetByHandle(pClient, hDisplayChannel, ppDispChannel) dispchnGetByHandle_IMPL(pClient, hDisplayChannel, ppDispChannel)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
/*!
|
||||
* RM internal class representing XXX_XXX_CHANNEL_PIO
|
||||
*/
|
||||
#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct DispChannelPio {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct DispChannel __nvoc_base_DispChannel;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct GpuResource *__nvoc_pbase_GpuResource;
|
||||
struct INotifier *__nvoc_pbase_INotifier;
|
||||
struct Notifier *__nvoc_pbase_Notifier;
|
||||
struct DispChannel *__nvoc_pbase_DispChannel;
|
||||
struct DispChannelPio *__nvoc_pbase_DispChannelPio;
|
||||
NvBool (*__dispchnpioShareCallback__)(struct DispChannelPio *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NV_STATUS (*__dispchnpioMapTo__)(struct DispChannelPio *, RS_RES_MAP_TO_PARAMS *);
|
||||
NV_STATUS (*__dispchnpioGetOrAllocNotifShare__)(struct DispChannelPio *, NvHandle, NvHandle, struct NotifShare **);
|
||||
NV_STATUS (*__dispchnpioCheckMemInterUnmap__)(struct DispChannelPio *, NvBool);
|
||||
NV_STATUS (*__dispchnpioGetMapAddrSpace__)(struct DispChannelPio *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
|
||||
void (*__dispchnpioSetNotificationShare__)(struct DispChannelPio *, struct NotifShare *);
|
||||
NvU32 (*__dispchnpioGetRefCount__)(struct DispChannelPio *);
|
||||
void (*__dispchnpioAddAdditionalDependants__)(struct RsClient *, struct DispChannelPio *, RsResourceRef *);
|
||||
NV_STATUS (*__dispchnpioControl_Prologue__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispchnpioGetRegBaseOffsetAndSize__)(struct DispChannelPio *, struct OBJGPU *, NvU32 *, NvU32 *);
|
||||
NV_STATUS (*__dispchnpioInternalControlForward__)(struct DispChannelPio *, NvU32, void *, NvU32);
|
||||
NV_STATUS (*__dispchnpioUnmapFrom__)(struct DispChannelPio *, RS_RES_UNMAP_FROM_PARAMS *);
|
||||
void (*__dispchnpioControl_Epilogue__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispchnpioControlLookup__)(struct DispChannelPio *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NvHandle (*__dispchnpioGetInternalObjectHandle__)(struct DispChannelPio *);
|
||||
NV_STATUS (*__dispchnpioControl__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispchnpioUnmap__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RsCpuMapping *);
|
||||
NV_STATUS (*__dispchnpioGetMemInterMapParams__)(struct DispChannelPio *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__dispchnpioGetMemoryMappingDescriptor__)(struct DispChannelPio *, struct MEMORY_DESCRIPTOR **);
|
||||
NV_STATUS (*__dispchnpioControlFilter__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispchnpioUnregisterEvent__)(struct DispChannelPio *, NvHandle, NvHandle, NvHandle, NvHandle);
|
||||
NvBool (*__dispchnpioCanCopy__)(struct DispChannelPio *);
|
||||
void (*__dispchnpioPreDestruct__)(struct DispChannelPio *);
|
||||
PEVENTNOTIFICATION *(*__dispchnpioGetNotificationListPtr__)(struct DispChannelPio *);
|
||||
struct NotifShare *(*__dispchnpioGetNotificationShare__)(struct DispChannelPio *);
|
||||
NV_STATUS (*__dispchnpioMap__)(struct DispChannelPio *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
|
||||
NvBool (*__dispchnpioAccessCallback__)(struct DispChannelPio *, struct RsClient *, void *, RsAccessRight);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_DispChannelPio_TYPEDEF__
|
||||
#define __NVOC_CLASS_DispChannelPio_TYPEDEF__
|
||||
typedef struct DispChannelPio DispChannelPio;
|
||||
#endif /* __NVOC_CLASS_DispChannelPio_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_DispChannelPio
|
||||
#define __nvoc_class_id_DispChannelPio 0x10dec3
|
||||
#endif /* __nvoc_class_id_DispChannelPio */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelPio;
|
||||
|
||||
#define __staticCast_DispChannelPio(pThis) \
|
||||
((pThis)->__nvoc_pbase_DispChannelPio)
|
||||
|
||||
#ifdef __nvoc_disp_channel_h_disabled
|
||||
#define __dynamicCast_DispChannelPio(pThis) ((DispChannelPio*)NULL)
|
||||
#else //__nvoc_disp_channel_h_disabled
|
||||
#define __dynamicCast_DispChannelPio(pThis) \
|
||||
((DispChannelPio*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannelPio)))
|
||||
#endif //__nvoc_disp_channel_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_DispChannelPio(DispChannelPio**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_DispChannelPio(DispChannelPio**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
#define __objCreate_DispChannelPio(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
|
||||
__nvoc_objCreate_DispChannelPio((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
|
||||
|
||||
#define dispchnpioShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchnpioShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define dispchnpioMapTo(pResource, pParams) dispchnpioMapTo_DISPATCH(pResource, pParams)
|
||||
#define dispchnpioGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchnpioGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
|
||||
#define dispchnpioCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchnpioCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
|
||||
#define dispchnpioGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchnpioGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
|
||||
#define dispchnpioSetNotificationShare(pNotifier, pNotifShare) dispchnpioSetNotificationShare_DISPATCH(pNotifier, pNotifShare)
|
||||
#define dispchnpioGetRefCount(pResource) dispchnpioGetRefCount_DISPATCH(pResource)
|
||||
#define dispchnpioAddAdditionalDependants(pClient, pResource, pReference) dispchnpioAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define dispchnpioControl_Prologue(pResource, pCallContext, pParams) dispchnpioControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispchnpioGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchnpioGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize)
|
||||
#define dispchnpioInternalControlForward(pGpuResource, command, pParams, size) dispchnpioInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
|
||||
#define dispchnpioUnmapFrom(pResource, pParams) dispchnpioUnmapFrom_DISPATCH(pResource, pParams)
|
||||
#define dispchnpioControl_Epilogue(pResource, pCallContext, pParams) dispchnpioControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispchnpioControlLookup(pResource, pParams, ppEntry) dispchnpioControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define dispchnpioGetInternalObjectHandle(pGpuResource) dispchnpioGetInternalObjectHandle_DISPATCH(pGpuResource)
|
||||
#define dispchnpioControl(pGpuResource, pCallContext, pParams) dispchnpioControl_DISPATCH(pGpuResource, pCallContext, pParams)
|
||||
#define dispchnpioUnmap(pGpuResource, pCallContext, pCpuMapping) dispchnpioUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
|
||||
#define dispchnpioGetMemInterMapParams(pRmResource, pParams) dispchnpioGetMemInterMapParams_DISPATCH(pRmResource, pParams)
|
||||
#define dispchnpioGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchnpioGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
|
||||
#define dispchnpioControlFilter(pResource, pCallContext, pParams) dispchnpioControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispchnpioUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchnpioUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent)
|
||||
#define dispchnpioCanCopy(pResource) dispchnpioCanCopy_DISPATCH(pResource)
|
||||
#define dispchnpioPreDestruct(pResource) dispchnpioPreDestruct_DISPATCH(pResource)
|
||||
#define dispchnpioGetNotificationListPtr(pNotifier) dispchnpioGetNotificationListPtr_DISPATCH(pNotifier)
|
||||
#define dispchnpioGetNotificationShare(pNotifier) dispchnpioGetNotificationShare_DISPATCH(pNotifier)
|
||||
#define dispchnpioMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchnpioMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
|
||||
#define dispchnpioAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchnpioAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
static inline NvBool dispchnpioShareCallback_DISPATCH(struct DispChannelPio *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pGpuResource->__dispchnpioShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioMapTo_DISPATCH(struct DispChannelPio *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pResource->__dispchnpioMapTo__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioGetOrAllocNotifShare_DISPATCH(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
|
||||
return pNotifier->__dispchnpioGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioCheckMemInterUnmap_DISPATCH(struct DispChannelPio *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return pRmResource->__dispchnpioCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioGetMapAddrSpace_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return pGpuResource->__dispchnpioGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static inline void dispchnpioSetNotificationShare_DISPATCH(struct DispChannelPio *pNotifier, struct NotifShare *pNotifShare) {
|
||||
pNotifier->__dispchnpioSetNotificationShare__(pNotifier, pNotifShare);
|
||||
}
|
||||
|
||||
static inline NvU32 dispchnpioGetRefCount_DISPATCH(struct DispChannelPio *pResource) {
|
||||
return pResource->__dispchnpioGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline void dispchnpioAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannelPio *pResource, RsResourceRef *pReference) {
|
||||
pResource->__dispchnpioAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioControl_Prologue_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__dispchnpioControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioGetRegBaseOffsetAndSize_DISPATCH(struct DispChannelPio *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return pDispChannel->__dispchnpioGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioInternalControlForward_DISPATCH(struct DispChannelPio *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return pGpuResource->__dispchnpioInternalControlForward__(pGpuResource, command, pParams, size);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioUnmapFrom_DISPATCH(struct DispChannelPio *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pResource->__dispchnpioUnmapFrom__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void dispchnpioControl_Epilogue_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__dispchnpioControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioControlLookup_DISPATCH(struct DispChannelPio *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__dispchnpioControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NvHandle dispchnpioGetInternalObjectHandle_DISPATCH(struct DispChannelPio *pGpuResource) {
|
||||
return pGpuResource->__dispchnpioGetInternalObjectHandle__(pGpuResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioControl_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pGpuResource->__dispchnpioControl__(pGpuResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioUnmap_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__dispchnpioUnmap__(pGpuResource, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioGetMemInterMapParams_DISPATCH(struct DispChannelPio *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pRmResource->__dispchnpioGetMemInterMapParams__(pRmResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioGetMemoryMappingDescriptor_DISPATCH(struct DispChannelPio *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pRmResource->__dispchnpioGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioControlFilter_DISPATCH(struct DispChannelPio *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__dispchnpioControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioUnregisterEvent_DISPATCH(struct DispChannelPio *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
|
||||
return pNotifier->__dispchnpioUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent);
|
||||
}
|
||||
|
||||
static inline NvBool dispchnpioCanCopy_DISPATCH(struct DispChannelPio *pResource) {
|
||||
return pResource->__dispchnpioCanCopy__(pResource);
|
||||
}
|
||||
|
||||
static inline void dispchnpioPreDestruct_DISPATCH(struct DispChannelPio *pResource) {
|
||||
pResource->__dispchnpioPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline PEVENTNOTIFICATION *dispchnpioGetNotificationListPtr_DISPATCH(struct DispChannelPio *pNotifier) {
|
||||
return pNotifier->__dispchnpioGetNotificationListPtr__(pNotifier);
|
||||
}
|
||||
|
||||
static inline struct NotifShare *dispchnpioGetNotificationShare_DISPATCH(struct DispChannelPio *pNotifier) {
|
||||
return pNotifier->__dispchnpioGetNotificationShare__(pNotifier);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchnpioMap_DISPATCH(struct DispChannelPio *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__dispchnpioMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NvBool dispchnpioAccessCallback_DISPATCH(struct DispChannelPio *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__dispchnpioAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
NV_STATUS dispchnpioConstruct_IMPL(struct DispChannelPio *arg_pDispChannelPio, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
|
||||
#define __nvoc_dispchnpioConstruct(arg_pDispChannelPio, arg_pCallContext, arg_pParams) dispchnpioConstruct_IMPL(arg_pDispChannelPio, arg_pCallContext, arg_pParams)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
/*!
|
||||
* RM internal class representing XXX_XXX_CHANNEL_DMA
|
||||
*/
|
||||
#ifdef NVOC_DISP_CHANNEL_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct DispChannelDma {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct DispChannel __nvoc_base_DispChannel;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct GpuResource *__nvoc_pbase_GpuResource;
|
||||
struct INotifier *__nvoc_pbase_INotifier;
|
||||
struct Notifier *__nvoc_pbase_Notifier;
|
||||
struct DispChannel *__nvoc_pbase_DispChannel;
|
||||
struct DispChannelDma *__nvoc_pbase_DispChannelDma;
|
||||
NvBool (*__dispchndmaShareCallback__)(struct DispChannelDma *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NV_STATUS (*__dispchndmaMapTo__)(struct DispChannelDma *, RS_RES_MAP_TO_PARAMS *);
|
||||
NV_STATUS (*__dispchndmaGetOrAllocNotifShare__)(struct DispChannelDma *, NvHandle, NvHandle, struct NotifShare **);
|
||||
NV_STATUS (*__dispchndmaCheckMemInterUnmap__)(struct DispChannelDma *, NvBool);
|
||||
NV_STATUS (*__dispchndmaGetMapAddrSpace__)(struct DispChannelDma *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
|
||||
void (*__dispchndmaSetNotificationShare__)(struct DispChannelDma *, struct NotifShare *);
|
||||
NvU32 (*__dispchndmaGetRefCount__)(struct DispChannelDma *);
|
||||
void (*__dispchndmaAddAdditionalDependants__)(struct RsClient *, struct DispChannelDma *, RsResourceRef *);
|
||||
NV_STATUS (*__dispchndmaControl_Prologue__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispchndmaGetRegBaseOffsetAndSize__)(struct DispChannelDma *, struct OBJGPU *, NvU32 *, NvU32 *);
|
||||
NV_STATUS (*__dispchndmaInternalControlForward__)(struct DispChannelDma *, NvU32, void *, NvU32);
|
||||
NV_STATUS (*__dispchndmaUnmapFrom__)(struct DispChannelDma *, RS_RES_UNMAP_FROM_PARAMS *);
|
||||
void (*__dispchndmaControl_Epilogue__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispchndmaControlLookup__)(struct DispChannelDma *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NvHandle (*__dispchndmaGetInternalObjectHandle__)(struct DispChannelDma *);
|
||||
NV_STATUS (*__dispchndmaControl__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispchndmaUnmap__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RsCpuMapping *);
|
||||
NV_STATUS (*__dispchndmaGetMemInterMapParams__)(struct DispChannelDma *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__dispchndmaGetMemoryMappingDescriptor__)(struct DispChannelDma *, struct MEMORY_DESCRIPTOR **);
|
||||
NV_STATUS (*__dispchndmaControlFilter__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispchndmaUnregisterEvent__)(struct DispChannelDma *, NvHandle, NvHandle, NvHandle, NvHandle);
|
||||
NvBool (*__dispchndmaCanCopy__)(struct DispChannelDma *);
|
||||
void (*__dispchndmaPreDestruct__)(struct DispChannelDma *);
|
||||
PEVENTNOTIFICATION *(*__dispchndmaGetNotificationListPtr__)(struct DispChannelDma *);
|
||||
struct NotifShare *(*__dispchndmaGetNotificationShare__)(struct DispChannelDma *);
|
||||
NV_STATUS (*__dispchndmaMap__)(struct DispChannelDma *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
|
||||
NvBool (*__dispchndmaAccessCallback__)(struct DispChannelDma *, struct RsClient *, void *, RsAccessRight);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_DispChannelDma_TYPEDEF__
|
||||
#define __NVOC_CLASS_DispChannelDma_TYPEDEF__
|
||||
typedef struct DispChannelDma DispChannelDma;
|
||||
#endif /* __NVOC_CLASS_DispChannelDma_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_DispChannelDma
|
||||
#define __nvoc_class_id_DispChannelDma 0xfe3d2e
|
||||
#endif /* __nvoc_class_id_DispChannelDma */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispChannelDma;
|
||||
|
||||
#define __staticCast_DispChannelDma(pThis) \
|
||||
((pThis)->__nvoc_pbase_DispChannelDma)
|
||||
|
||||
#ifdef __nvoc_disp_channel_h_disabled
|
||||
#define __dynamicCast_DispChannelDma(pThis) ((DispChannelDma*)NULL)
|
||||
#else //__nvoc_disp_channel_h_disabled
|
||||
#define __dynamicCast_DispChannelDma(pThis) \
|
||||
((DispChannelDma*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispChannelDma)))
|
||||
#endif //__nvoc_disp_channel_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_DispChannelDma(DispChannelDma**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_DispChannelDma(DispChannelDma**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
#define __objCreate_DispChannelDma(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
|
||||
__nvoc_objCreate_DispChannelDma((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
|
||||
|
||||
#define dispchndmaShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispchndmaShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define dispchndmaMapTo(pResource, pParams) dispchndmaMapTo_DISPATCH(pResource, pParams)
|
||||
#define dispchndmaGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) dispchndmaGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
|
||||
#define dispchndmaCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispchndmaCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
|
||||
#define dispchndmaGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispchndmaGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
|
||||
#define dispchndmaSetNotificationShare(pNotifier, pNotifShare) dispchndmaSetNotificationShare_DISPATCH(pNotifier, pNotifShare)
|
||||
#define dispchndmaGetRefCount(pResource) dispchndmaGetRefCount_DISPATCH(pResource)
|
||||
#define dispchndmaAddAdditionalDependants(pClient, pResource, pReference) dispchndmaAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define dispchndmaControl_Prologue(pResource, pCallContext, pParams) dispchndmaControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispchndmaGetRegBaseOffsetAndSize(pDispChannel, pGpu, pOffset, pSize) dispchndmaGetRegBaseOffsetAndSize_DISPATCH(pDispChannel, pGpu, pOffset, pSize)
|
||||
#define dispchndmaInternalControlForward(pGpuResource, command, pParams, size) dispchndmaInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
|
||||
#define dispchndmaUnmapFrom(pResource, pParams) dispchndmaUnmapFrom_DISPATCH(pResource, pParams)
|
||||
#define dispchndmaControl_Epilogue(pResource, pCallContext, pParams) dispchndmaControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispchndmaControlLookup(pResource, pParams, ppEntry) dispchndmaControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define dispchndmaGetInternalObjectHandle(pGpuResource) dispchndmaGetInternalObjectHandle_DISPATCH(pGpuResource)
|
||||
#define dispchndmaControl(pGpuResource, pCallContext, pParams) dispchndmaControl_DISPATCH(pGpuResource, pCallContext, pParams)
|
||||
#define dispchndmaUnmap(pGpuResource, pCallContext, pCpuMapping) dispchndmaUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
|
||||
#define dispchndmaGetMemInterMapParams(pRmResource, pParams) dispchndmaGetMemInterMapParams_DISPATCH(pRmResource, pParams)
|
||||
#define dispchndmaGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispchndmaGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
|
||||
#define dispchndmaControlFilter(pResource, pCallContext, pParams) dispchndmaControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispchndmaUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) dispchndmaUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent)
|
||||
#define dispchndmaCanCopy(pResource) dispchndmaCanCopy_DISPATCH(pResource)
|
||||
#define dispchndmaPreDestruct(pResource) dispchndmaPreDestruct_DISPATCH(pResource)
|
||||
#define dispchndmaGetNotificationListPtr(pNotifier) dispchndmaGetNotificationListPtr_DISPATCH(pNotifier)
|
||||
#define dispchndmaGetNotificationShare(pNotifier) dispchndmaGetNotificationShare_DISPATCH(pNotifier)
|
||||
#define dispchndmaMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispchndmaMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
|
||||
#define dispchndmaAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispchndmaAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
static inline NvBool dispchndmaShareCallback_DISPATCH(struct DispChannelDma *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pGpuResource->__dispchndmaShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaMapTo_DISPATCH(struct DispChannelDma *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pResource->__dispchndmaMapTo__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaGetOrAllocNotifShare_DISPATCH(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
|
||||
return pNotifier->__dispchndmaGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaCheckMemInterUnmap_DISPATCH(struct DispChannelDma *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return pRmResource->__dispchndmaCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaGetMapAddrSpace_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return pGpuResource->__dispchndmaGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static inline void dispchndmaSetNotificationShare_DISPATCH(struct DispChannelDma *pNotifier, struct NotifShare *pNotifShare) {
|
||||
pNotifier->__dispchndmaSetNotificationShare__(pNotifier, pNotifShare);
|
||||
}
|
||||
|
||||
static inline NvU32 dispchndmaGetRefCount_DISPATCH(struct DispChannelDma *pResource) {
|
||||
return pResource->__dispchndmaGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline void dispchndmaAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispChannelDma *pResource, RsResourceRef *pReference) {
|
||||
pResource->__dispchndmaAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaControl_Prologue_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__dispchndmaControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaGetRegBaseOffsetAndSize_DISPATCH(struct DispChannelDma *pDispChannel, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return pDispChannel->__dispchndmaGetRegBaseOffsetAndSize__(pDispChannel, pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaInternalControlForward_DISPATCH(struct DispChannelDma *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return pGpuResource->__dispchndmaInternalControlForward__(pGpuResource, command, pParams, size);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaUnmapFrom_DISPATCH(struct DispChannelDma *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pResource->__dispchndmaUnmapFrom__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void dispchndmaControl_Epilogue_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__dispchndmaControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaControlLookup_DISPATCH(struct DispChannelDma *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__dispchndmaControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NvHandle dispchndmaGetInternalObjectHandle_DISPATCH(struct DispChannelDma *pGpuResource) {
|
||||
return pGpuResource->__dispchndmaGetInternalObjectHandle__(pGpuResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaControl_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pGpuResource->__dispchndmaControl__(pGpuResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaUnmap_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__dispchndmaUnmap__(pGpuResource, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaGetMemInterMapParams_DISPATCH(struct DispChannelDma *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pRmResource->__dispchndmaGetMemInterMapParams__(pRmResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaGetMemoryMappingDescriptor_DISPATCH(struct DispChannelDma *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pRmResource->__dispchndmaGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaControlFilter_DISPATCH(struct DispChannelDma *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__dispchndmaControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaUnregisterEvent_DISPATCH(struct DispChannelDma *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
|
||||
return pNotifier->__dispchndmaUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent);
|
||||
}
|
||||
|
||||
static inline NvBool dispchndmaCanCopy_DISPATCH(struct DispChannelDma *pResource) {
|
||||
return pResource->__dispchndmaCanCopy__(pResource);
|
||||
}
|
||||
|
||||
static inline void dispchndmaPreDestruct_DISPATCH(struct DispChannelDma *pResource) {
|
||||
pResource->__dispchndmaPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline PEVENTNOTIFICATION *dispchndmaGetNotificationListPtr_DISPATCH(struct DispChannelDma *pNotifier) {
|
||||
return pNotifier->__dispchndmaGetNotificationListPtr__(pNotifier);
|
||||
}
|
||||
|
||||
static inline struct NotifShare *dispchndmaGetNotificationShare_DISPATCH(struct DispChannelDma *pNotifier) {
|
||||
return pNotifier->__dispchndmaGetNotificationShare__(pNotifier);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispchndmaMap_DISPATCH(struct DispChannelDma *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__dispchndmaMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NvBool dispchndmaAccessCallback_DISPATCH(struct DispChannelDma *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__dispchndmaAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
NV_STATUS dispchndmaConstruct_IMPL(struct DispChannelDma *arg_pDispChannelDma, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
|
||||
#define __nvoc_dispchndmaConstruct(arg_pDispChannelDma, arg_pCallContext, arg_pParams) dispchndmaConstruct_IMPL(arg_pDispChannelDma, arg_pCallContext, arg_pParams)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#endif // DISP_CHANNEL_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_DISP_CHANNEL_NVOC_H_
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,329 +0,0 @@
|
||||
#define NVOC_DISP_SF_USER_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_disp_sf_user_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xba7439 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
|
||||
|
||||
void __nvoc_init_DispSfUser(DispSfUser*);
|
||||
void __nvoc_init_funcTable_DispSfUser(DispSfUser*);
|
||||
NV_STATUS __nvoc_ctor_DispSfUser(DispSfUser*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_DispSfUser(DispSfUser*);
|
||||
void __nvoc_dtor_DispSfUser(DispSfUser*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSfUser;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_DispSfUser = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_DispSfUser,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_DispSfUser,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource.__nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_DispSfUser_GpuResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(DispSfUser, __nvoc_base_GpuResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_DispSfUser = {
|
||||
/*numRelatives=*/ 6,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_DispSfUser_DispSfUser,
|
||||
&__nvoc_rtti_DispSfUser_GpuResource,
|
||||
&__nvoc_rtti_DispSfUser_RmResource,
|
||||
&__nvoc_rtti_DispSfUser_RmResourceCommon,
|
||||
&__nvoc_rtti_DispSfUser_RsResource,
|
||||
&__nvoc_rtti_DispSfUser_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(DispSfUser),
|
||||
/*classId=*/ classId(DispSfUser),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "DispSfUser",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_DispSfUser,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_DispSfUser,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_DispSfUser
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_DispSfUser_gpuresGetRegBaseOffsetAndSize(struct GpuResource *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return dispsfGetRegBaseOffsetAndSize((struct DispSfUser *)(((unsigned char *)pDispSfUser) - __nvoc_rtti_DispSfUser_GpuResource.offset), pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_GpuResource_dispsfShareCallback(struct DispSfUser *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_dispsfControl(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_dispsfUnmap(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_dispsfGetMemInterMapParams(struct DispSfUser *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_dispsfGetMemoryMappingDescriptor(struct DispSfUser *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSfUser_RmResource.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_dispsfGetMapAddrSpace(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static NvHandle __nvoc_thunk_GpuResource_dispsfGetInternalObjectHandle(struct DispSfUser *pGpuResource) {
|
||||
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_dispsfControlFilter(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_dispsfAddAdditionalDependants(struct RsClient *pClient, struct DispSfUser *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_dispsfGetRefCount(struct DispSfUser *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_dispsfCheckMemInterUnmap(struct DispSfUser *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_DispSfUser_RmResource.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_dispsfMapTo(struct DispSfUser *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_dispsfControl_Prologue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RsResource_dispsfCanCopy(struct DispSfUser *pResource) {
|
||||
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_dispsfInternalControlForward(struct DispSfUser *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), command, pParams, size);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_dispsfPreDestruct(struct DispSfUser *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_dispsfUnmapFrom(struct DispSfUser *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_dispsfControl_Epilogue(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_dispsfControlLookup(struct DispSfUser *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_dispsfMap(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_DispSfUser_GpuResource.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_dispsfAccessCallback(struct DispSfUser *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_DispSfUser_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_DispSfUser =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_GpuResource(GpuResource*);
|
||||
void __nvoc_dtor_DispSfUser(DispSfUser *pThis) {
|
||||
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_DispSfUser(DispSfUser *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_DispSfUser(DispSfUser *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_DispSfUser_fail_GpuResource;
|
||||
__nvoc_init_dataField_DispSfUser(pThis);
|
||||
|
||||
status = __nvoc_dispsfConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_DispSfUser_fail__init;
|
||||
goto __nvoc_ctor_DispSfUser_exit; // Success
|
||||
|
||||
__nvoc_ctor_DispSfUser_fail__init:
|
||||
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
__nvoc_ctor_DispSfUser_fail_GpuResource:
|
||||
__nvoc_ctor_DispSfUser_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_DispSfUser_1(DispSfUser *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__dispsfGetRegBaseOffsetAndSize__ = &dispsfGetRegBaseOffsetAndSize_IMPL;
|
||||
|
||||
pThis->__nvoc_base_GpuResource.__gpuresGetRegBaseOffsetAndSize__ = &__nvoc_thunk_DispSfUser_gpuresGetRegBaseOffsetAndSize;
|
||||
|
||||
pThis->__dispsfShareCallback__ = &__nvoc_thunk_GpuResource_dispsfShareCallback;
|
||||
|
||||
pThis->__dispsfControl__ = &__nvoc_thunk_GpuResource_dispsfControl;
|
||||
|
||||
pThis->__dispsfUnmap__ = &__nvoc_thunk_GpuResource_dispsfUnmap;
|
||||
|
||||
pThis->__dispsfGetMemInterMapParams__ = &__nvoc_thunk_RmResource_dispsfGetMemInterMapParams;
|
||||
|
||||
pThis->__dispsfGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_dispsfGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__dispsfGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_dispsfGetMapAddrSpace;
|
||||
|
||||
pThis->__dispsfGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_dispsfGetInternalObjectHandle;
|
||||
|
||||
pThis->__dispsfControlFilter__ = &__nvoc_thunk_RsResource_dispsfControlFilter;
|
||||
|
||||
pThis->__dispsfAddAdditionalDependants__ = &__nvoc_thunk_RsResource_dispsfAddAdditionalDependants;
|
||||
|
||||
pThis->__dispsfGetRefCount__ = &__nvoc_thunk_RsResource_dispsfGetRefCount;
|
||||
|
||||
pThis->__dispsfCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_dispsfCheckMemInterUnmap;
|
||||
|
||||
pThis->__dispsfMapTo__ = &__nvoc_thunk_RsResource_dispsfMapTo;
|
||||
|
||||
pThis->__dispsfControl_Prologue__ = &__nvoc_thunk_RmResource_dispsfControl_Prologue;
|
||||
|
||||
pThis->__dispsfCanCopy__ = &__nvoc_thunk_RsResource_dispsfCanCopy;
|
||||
|
||||
pThis->__dispsfInternalControlForward__ = &__nvoc_thunk_GpuResource_dispsfInternalControlForward;
|
||||
|
||||
pThis->__dispsfPreDestruct__ = &__nvoc_thunk_RsResource_dispsfPreDestruct;
|
||||
|
||||
pThis->__dispsfUnmapFrom__ = &__nvoc_thunk_RsResource_dispsfUnmapFrom;
|
||||
|
||||
pThis->__dispsfControl_Epilogue__ = &__nvoc_thunk_RmResource_dispsfControl_Epilogue;
|
||||
|
||||
pThis->__dispsfControlLookup__ = &__nvoc_thunk_RsResource_dispsfControlLookup;
|
||||
|
||||
pThis->__dispsfMap__ = &__nvoc_thunk_GpuResource_dispsfMap;
|
||||
|
||||
pThis->__dispsfAccessCallback__ = &__nvoc_thunk_RmResource_dispsfAccessCallback;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_DispSfUser(DispSfUser *pThis) {
|
||||
__nvoc_init_funcTable_DispSfUser_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_GpuResource(GpuResource*);
|
||||
void __nvoc_init_DispSfUser(DispSfUser *pThis) {
|
||||
pThis->__nvoc_pbase_DispSfUser = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
|
||||
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
|
||||
__nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
__nvoc_init_funcTable_DispSfUser(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_DispSfUser(DispSfUser **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
DispSfUser *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(DispSfUser));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(DispSfUser));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_DispSfUser);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_DispSfUser(pThis);
|
||||
status = __nvoc_ctor_DispSfUser(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_DispSfUser_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_DispSfUser_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_DispSfUser(DispSfUser **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_DispSfUser(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,239 +0,0 @@
|
||||
#ifndef _G_DISP_SF_USER_NVOC_H_
|
||||
#define _G_DISP_SF_USER_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* Description:
|
||||
* This file contains functions managing DispSfUser class.
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
#include "g_disp_sf_user_nvoc.h"
|
||||
|
||||
#ifndef DISP_SF_USER_H
|
||||
#define DISP_SF_USER_H
|
||||
|
||||
#include "gpu/gpu_resource.h"
|
||||
|
||||
/*!
|
||||
* RM internal class representing NVXXXX_DISP_SF_USER
|
||||
*/
|
||||
#ifdef NVOC_DISP_SF_USER_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct DispSfUser {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct GpuResource __nvoc_base_GpuResource;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct GpuResource *__nvoc_pbase_GpuResource;
|
||||
struct DispSfUser *__nvoc_pbase_DispSfUser;
|
||||
NV_STATUS (*__dispsfGetRegBaseOffsetAndSize__)(struct DispSfUser *, struct OBJGPU *, NvU32 *, NvU32 *);
|
||||
NvBool (*__dispsfShareCallback__)(struct DispSfUser *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NV_STATUS (*__dispsfControl__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispsfUnmap__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RsCpuMapping *);
|
||||
NV_STATUS (*__dispsfGetMemInterMapParams__)(struct DispSfUser *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__dispsfGetMemoryMappingDescriptor__)(struct DispSfUser *, struct MEMORY_DESCRIPTOR **);
|
||||
NV_STATUS (*__dispsfGetMapAddrSpace__)(struct DispSfUser *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
|
||||
NvHandle (*__dispsfGetInternalObjectHandle__)(struct DispSfUser *);
|
||||
NV_STATUS (*__dispsfControlFilter__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
void (*__dispsfAddAdditionalDependants__)(struct RsClient *, struct DispSfUser *, RsResourceRef *);
|
||||
NvU32 (*__dispsfGetRefCount__)(struct DispSfUser *);
|
||||
NV_STATUS (*__dispsfCheckMemInterUnmap__)(struct DispSfUser *, NvBool);
|
||||
NV_STATUS (*__dispsfMapTo__)(struct DispSfUser *, RS_RES_MAP_TO_PARAMS *);
|
||||
NV_STATUS (*__dispsfControl_Prologue__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NvBool (*__dispsfCanCopy__)(struct DispSfUser *);
|
||||
NV_STATUS (*__dispsfInternalControlForward__)(struct DispSfUser *, NvU32, void *, NvU32);
|
||||
void (*__dispsfPreDestruct__)(struct DispSfUser *);
|
||||
NV_STATUS (*__dispsfUnmapFrom__)(struct DispSfUser *, RS_RES_UNMAP_FROM_PARAMS *);
|
||||
void (*__dispsfControl_Epilogue__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__dispsfControlLookup__)(struct DispSfUser *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NV_STATUS (*__dispsfMap__)(struct DispSfUser *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
|
||||
NvBool (*__dispsfAccessCallback__)(struct DispSfUser *, struct RsClient *, void *, RsAccessRight);
|
||||
NvU32 ControlOffset;
|
||||
NvU32 ControlLength;
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_DispSfUser_TYPEDEF__
|
||||
#define __NVOC_CLASS_DispSfUser_TYPEDEF__
|
||||
typedef struct DispSfUser DispSfUser;
|
||||
#endif /* __NVOC_CLASS_DispSfUser_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_DispSfUser
|
||||
#define __nvoc_class_id_DispSfUser 0xba7439
|
||||
#endif /* __nvoc_class_id_DispSfUser */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_DispSfUser;
|
||||
|
||||
#define __staticCast_DispSfUser(pThis) \
|
||||
((pThis)->__nvoc_pbase_DispSfUser)
|
||||
|
||||
#ifdef __nvoc_disp_sf_user_h_disabled
|
||||
#define __dynamicCast_DispSfUser(pThis) ((DispSfUser*)NULL)
|
||||
#else //__nvoc_disp_sf_user_h_disabled
|
||||
#define __dynamicCast_DispSfUser(pThis) \
|
||||
((DispSfUser*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(DispSfUser)))
|
||||
#endif //__nvoc_disp_sf_user_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_DispSfUser(DispSfUser**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_DispSfUser(DispSfUser**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
#define __objCreate_DispSfUser(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
|
||||
__nvoc_objCreate_DispSfUser((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
|
||||
|
||||
#define dispsfGetRegBaseOffsetAndSize(pDispSfUser, pGpu, pOffset, pSize) dispsfGetRegBaseOffsetAndSize_DISPATCH(pDispSfUser, pGpu, pOffset, pSize)
|
||||
#define dispsfShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) dispsfShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define dispsfControl(pGpuResource, pCallContext, pParams) dispsfControl_DISPATCH(pGpuResource, pCallContext, pParams)
|
||||
#define dispsfUnmap(pGpuResource, pCallContext, pCpuMapping) dispsfUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
|
||||
#define dispsfGetMemInterMapParams(pRmResource, pParams) dispsfGetMemInterMapParams_DISPATCH(pRmResource, pParams)
|
||||
#define dispsfGetMemoryMappingDescriptor(pRmResource, ppMemDesc) dispsfGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
|
||||
#define dispsfGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) dispsfGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
|
||||
#define dispsfGetInternalObjectHandle(pGpuResource) dispsfGetInternalObjectHandle_DISPATCH(pGpuResource)
|
||||
#define dispsfControlFilter(pResource, pCallContext, pParams) dispsfControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispsfAddAdditionalDependants(pClient, pResource, pReference) dispsfAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define dispsfGetRefCount(pResource) dispsfGetRefCount_DISPATCH(pResource)
|
||||
#define dispsfCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) dispsfCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
|
||||
#define dispsfMapTo(pResource, pParams) dispsfMapTo_DISPATCH(pResource, pParams)
|
||||
#define dispsfControl_Prologue(pResource, pCallContext, pParams) dispsfControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispsfCanCopy(pResource) dispsfCanCopy_DISPATCH(pResource)
|
||||
#define dispsfInternalControlForward(pGpuResource, command, pParams, size) dispsfInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
|
||||
#define dispsfPreDestruct(pResource) dispsfPreDestruct_DISPATCH(pResource)
|
||||
#define dispsfUnmapFrom(pResource, pParams) dispsfUnmapFrom_DISPATCH(pResource, pParams)
|
||||
#define dispsfControl_Epilogue(pResource, pCallContext, pParams) dispsfControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define dispsfControlLookup(pResource, pParams, ppEntry) dispsfControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define dispsfMap(pGpuResource, pCallContext, pParams, pCpuMapping) dispsfMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
|
||||
#define dispsfAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) dispsfAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
NV_STATUS dispsfGetRegBaseOffsetAndSize_IMPL(struct DispSfUser *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize);
|
||||
|
||||
static inline NV_STATUS dispsfGetRegBaseOffsetAndSize_DISPATCH(struct DispSfUser *pDispSfUser, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return pDispSfUser->__dispsfGetRegBaseOffsetAndSize__(pDispSfUser, pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static inline NvBool dispsfShareCallback_DISPATCH(struct DispSfUser *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pGpuResource->__dispsfShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispsfControl_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pGpuResource->__dispsfControl__(pGpuResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispsfUnmap_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__dispsfUnmap__(pGpuResource, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispsfGetMemInterMapParams_DISPATCH(struct DispSfUser *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pRmResource->__dispsfGetMemInterMapParams__(pRmResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispsfGetMemoryMappingDescriptor_DISPATCH(struct DispSfUser *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pRmResource->__dispsfGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispsfGetMapAddrSpace_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return pGpuResource->__dispsfGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static inline NvHandle dispsfGetInternalObjectHandle_DISPATCH(struct DispSfUser *pGpuResource) {
|
||||
return pGpuResource->__dispsfGetInternalObjectHandle__(pGpuResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispsfControlFilter_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__dispsfControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline void dispsfAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct DispSfUser *pResource, RsResourceRef *pReference) {
|
||||
pResource->__dispsfAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline NvU32 dispsfGetRefCount_DISPATCH(struct DispSfUser *pResource) {
|
||||
return pResource->__dispsfGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispsfCheckMemInterUnmap_DISPATCH(struct DispSfUser *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return pRmResource->__dispsfCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispsfMapTo_DISPATCH(struct DispSfUser *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pResource->__dispsfMapTo__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispsfControl_Prologue_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__dispsfControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NvBool dispsfCanCopy_DISPATCH(struct DispSfUser *pResource) {
|
||||
return pResource->__dispsfCanCopy__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispsfInternalControlForward_DISPATCH(struct DispSfUser *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return pGpuResource->__dispsfInternalControlForward__(pGpuResource, command, pParams, size);
|
||||
}
|
||||
|
||||
static inline void dispsfPreDestruct_DISPATCH(struct DispSfUser *pResource) {
|
||||
pResource->__dispsfPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispsfUnmapFrom_DISPATCH(struct DispSfUser *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pResource->__dispsfUnmapFrom__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void dispsfControl_Epilogue_DISPATCH(struct DispSfUser *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__dispsfControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispsfControlLookup_DISPATCH(struct DispSfUser *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__dispsfControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NV_STATUS dispsfMap_DISPATCH(struct DispSfUser *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__dispsfMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NvBool dispsfAccessCallback_DISPATCH(struct DispSfUser *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__dispsfAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
NV_STATUS dispsfConstruct_IMPL(struct DispSfUser *arg_pDispSfUser, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
|
||||
#define __nvoc_dispsfConstruct(arg_pDispSfUser, arg_pCallContext, arg_pParams) dispsfConstruct_IMPL(arg_pDispSfUser, arg_pCallContext, arg_pParams)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#endif // DISP_SF_USER_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_DISP_SF_USER_NVOC_H_
|
||||
@@ -1,189 +0,0 @@
|
||||
#define NVOC_ENG_STATE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_eng_state_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x7a7ed6 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
void __nvoc_init_OBJENGSTATE(OBJENGSTATE*);
|
||||
void __nvoc_init_funcTable_OBJENGSTATE(OBJENGSTATE*);
|
||||
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE*);
|
||||
void __nvoc_init_dataField_OBJENGSTATE(OBJENGSTATE*);
|
||||
void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJENGSTATE;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJENGSTATE_OBJENGSTATE = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJENGSTATE,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJENGSTATE_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJENGSTATE, __nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJENGSTATE = {
|
||||
/*numRelatives=*/ 2,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_OBJENGSTATE_OBJENGSTATE,
|
||||
&__nvoc_rtti_OBJENGSTATE_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(OBJENGSTATE),
|
||||
/*classId=*/ classId(OBJENGSTATE),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "OBJENGSTATE",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJENGSTATE,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_OBJENGSTATE,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_OBJENGSTATE
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJENGSTATE =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Object(Object*);
|
||||
void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE *pThis) {
|
||||
__nvoc_engstateDestruct(pThis);
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_OBJENGSTATE(OBJENGSTATE *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Object(Object* );
|
||||
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJENGSTATE_fail_Object;
|
||||
__nvoc_init_dataField_OBJENGSTATE(pThis);
|
||||
goto __nvoc_ctor_OBJENGSTATE_exit; // Success
|
||||
|
||||
__nvoc_ctor_OBJENGSTATE_fail_Object:
|
||||
__nvoc_ctor_OBJENGSTATE_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_OBJENGSTATE_1(OBJENGSTATE *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__engstateConstructEngine__ = &engstateConstructEngine_IMPL;
|
||||
|
||||
pThis->__engstateInitMissing__ = &engstateInitMissing_IMPL;
|
||||
|
||||
pThis->__engstateStatePreInitLocked__ = &engstateStatePreInitLocked_IMPL;
|
||||
|
||||
pThis->__engstateStatePreInitUnlocked__ = &engstateStatePreInitUnlocked_IMPL;
|
||||
|
||||
pThis->__engstateStateInitLocked__ = &engstateStateInitLocked_IMPL;
|
||||
|
||||
pThis->__engstateStateInitUnlocked__ = &engstateStateInitUnlocked_IMPL;
|
||||
|
||||
pThis->__engstateStatePreLoad__ = &engstateStatePreLoad_IMPL;
|
||||
|
||||
pThis->__engstateStateLoad__ = &engstateStateLoad_IMPL;
|
||||
|
||||
pThis->__engstateStatePostLoad__ = &engstateStatePostLoad_IMPL;
|
||||
|
||||
pThis->__engstateStatePreUnload__ = &engstateStatePreUnload_IMPL;
|
||||
|
||||
pThis->__engstateStateUnload__ = &engstateStateUnload_IMPL;
|
||||
|
||||
pThis->__engstateStatePostUnload__ = &engstateStatePostUnload_IMPL;
|
||||
|
||||
pThis->__engstateStateDestroy__ = &engstateStateDestroy_IMPL;
|
||||
|
||||
pThis->__engstateAllocTunableState__ = &engstateAllocTunableState_IMPL;
|
||||
|
||||
pThis->__engstateFreeTunableState__ = &engstateFreeTunableState_IMPL;
|
||||
|
||||
pThis->__engstateGetTunableState__ = &engstateGetTunableState_IMPL;
|
||||
|
||||
pThis->__engstateSetTunableState__ = &engstateSetTunableState_IMPL;
|
||||
|
||||
pThis->__engstateReconcileTunableState__ = &engstateReconcileTunableState_IMPL;
|
||||
|
||||
pThis->__engstateCompareTunableState__ = &engstateCompareTunableState_IMPL;
|
||||
|
||||
pThis->__engstateIsPresent__ = &engstateIsPresent_IMPL;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_OBJENGSTATE(OBJENGSTATE *pThis) {
|
||||
__nvoc_init_funcTable_OBJENGSTATE_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_OBJENGSTATE(OBJENGSTATE *pThis) {
|
||||
pThis->__nvoc_pbase_OBJENGSTATE = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
|
||||
__nvoc_init_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_init_funcTable_OBJENGSTATE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OBJENGSTATE(OBJENGSTATE **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
OBJENGSTATE *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(OBJENGSTATE));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(OBJENGSTATE));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJENGSTATE);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_OBJENGSTATE(pThis);
|
||||
status = __nvoc_ctor_OBJENGSTATE(pThis);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_OBJENGSTATE_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_OBJENGSTATE_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OBJENGSTATE(OBJENGSTATE **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_OBJENGSTATE(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,385 +0,0 @@
|
||||
#ifndef _G_ENG_STATE_NVOC_H_
|
||||
#define _G_ENG_STATE_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "g_eng_state_nvoc.h"
|
||||
|
||||
#ifndef _ENG_STATE_H_
|
||||
#define _ENG_STATE_H_
|
||||
|
||||
/*!
|
||||
* @file eng_state.h
|
||||
* @brief Provides definitions for all OBJENGSTATE data structures and interfaces.
|
||||
*/
|
||||
|
||||
#include "core/core.h"
|
||||
#include "gpu/eng_desc.h"
|
||||
|
||||
typedef enum ENGSTATE_STATE
|
||||
{
|
||||
ENGSTATE_STATE_UNDEFINED = 0,
|
||||
ENGSTATE_STATE_CONSTRUCT,
|
||||
ENGSTATE_STATE_PRE_INIT,
|
||||
ENGSTATE_STATE_INIT,
|
||||
ENGSTATE_STATE_PRE_LOAD,
|
||||
ENGSTATE_STATE_LOAD,
|
||||
ENGSTATE_STATE_POST_LOAD,
|
||||
ENGSTATE_STATE_PRE_UNLOAD,
|
||||
ENGSTATE_STATE_UNLOAD,
|
||||
ENGSTATE_STATE_POST_UNLOAD,
|
||||
ENGSTATE_STATE_DESTROY,
|
||||
ENGSTATE_STATE_COUNT // Keep this last
|
||||
} ENGSTATE_STATE;
|
||||
|
||||
// Stats data stored for every state transition
|
||||
typedef struct ENGSTATE_STATS
|
||||
{
|
||||
NvS32 memoryAllocCount;
|
||||
NvS32 memoryAllocSize;
|
||||
NvU32 transitionTimeUs;
|
||||
} ENGSTATE_STATS;
|
||||
|
||||
// Temporary transition data, not stored
|
||||
typedef struct ENGSTATE_TRANSITION_DATA
|
||||
{
|
||||
NvS64 memoryAllocCount;
|
||||
NvS64 memoryAllocSize;
|
||||
NvU64 transitionStartTimeNs;
|
||||
} ENGSTATE_TRANSITION_DATA;
|
||||
|
||||
typedef struct OBJENGSTATE *POBJENGSTATE;
|
||||
|
||||
#define ENG_GET_FIFO(p) (engstateGetFifo(staticCast((p), OBJENGSTATE)))
|
||||
#define ENG_GET_ENG_DESC(p) (staticCast((p), OBJENGSTATE)->engDesc)
|
||||
|
||||
|
||||
/*!
|
||||
* Defines the structure used to contain all generic information related to
|
||||
* the OBJENGSTATE.
|
||||
*/
|
||||
#ifdef NVOC_ENG_STATE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct OBJENGSTATE {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct Object __nvoc_base_Object;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
|
||||
NV_STATUS (*__engstateConstructEngine__)(POBJGPU, POBJENGSTATE, ENGDESCRIPTOR);
|
||||
void (*__engstateInitMissing__)(POBJGPU, POBJENGSTATE);
|
||||
NV_STATUS (*__engstateStatePreInitLocked__)(POBJGPU, POBJENGSTATE);
|
||||
NV_STATUS (*__engstateStatePreInitUnlocked__)(POBJGPU, POBJENGSTATE);
|
||||
NV_STATUS (*__engstateStateInitLocked__)(POBJGPU, POBJENGSTATE);
|
||||
NV_STATUS (*__engstateStateInitUnlocked__)(POBJGPU, POBJENGSTATE);
|
||||
NV_STATUS (*__engstateStatePreLoad__)(POBJGPU, POBJENGSTATE, NvU32);
|
||||
NV_STATUS (*__engstateStateLoad__)(POBJGPU, POBJENGSTATE, NvU32);
|
||||
NV_STATUS (*__engstateStatePostLoad__)(POBJGPU, POBJENGSTATE, NvU32);
|
||||
NV_STATUS (*__engstateStatePreUnload__)(POBJGPU, POBJENGSTATE, NvU32);
|
||||
NV_STATUS (*__engstateStateUnload__)(POBJGPU, POBJENGSTATE, NvU32);
|
||||
NV_STATUS (*__engstateStatePostUnload__)(POBJGPU, POBJENGSTATE, NvU32);
|
||||
void (*__engstateStateDestroy__)(POBJGPU, POBJENGSTATE);
|
||||
NV_STATUS (*__engstateAllocTunableState__)(POBJGPU, POBJENGSTATE, void **);
|
||||
void (*__engstateFreeTunableState__)(POBJGPU, POBJENGSTATE, void *);
|
||||
NV_STATUS (*__engstateGetTunableState__)(POBJGPU, POBJENGSTATE, void *);
|
||||
NV_STATUS (*__engstateSetTunableState__)(POBJGPU, POBJENGSTATE, void *);
|
||||
NV_STATUS (*__engstateReconcileTunableState__)(POBJGPU, POBJENGSTATE, void *);
|
||||
NV_STATUS (*__engstateCompareTunableState__)(POBJGPU, POBJENGSTATE, void *, void *);
|
||||
NvBool (*__engstateIsPresent__)(POBJGPU, POBJENGSTATE);
|
||||
NvBool PDB_PROP_ENGSTATE_IS_MISSING;
|
||||
ENGDESCRIPTOR engDesc;
|
||||
void *pOriginalTunableState;
|
||||
struct OBJGPU *pGpu;
|
||||
ENGSTATE_STATE currentState;
|
||||
ENGSTATE_STATS stats[11];
|
||||
char name[100];
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_OBJENGSTATE_TYPEDEF__
|
||||
#define __NVOC_CLASS_OBJENGSTATE_TYPEDEF__
|
||||
typedef struct OBJENGSTATE OBJENGSTATE;
|
||||
#endif /* __NVOC_CLASS_OBJENGSTATE_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_OBJENGSTATE
|
||||
#define __nvoc_class_id_OBJENGSTATE 0x7a7ed6
|
||||
#endif /* __nvoc_class_id_OBJENGSTATE */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
|
||||
|
||||
#define __staticCast_OBJENGSTATE(pThis) \
|
||||
((pThis)->__nvoc_pbase_OBJENGSTATE)
|
||||
|
||||
#ifdef __nvoc_eng_state_h_disabled
|
||||
#define __dynamicCast_OBJENGSTATE(pThis) ((OBJENGSTATE*)NULL)
|
||||
#else //__nvoc_eng_state_h_disabled
|
||||
#define __dynamicCast_OBJENGSTATE(pThis) \
|
||||
((OBJENGSTATE*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OBJENGSTATE)))
|
||||
#endif //__nvoc_eng_state_h_disabled
|
||||
|
||||
#define PDB_PROP_ENGSTATE_IS_MISSING_BASE_CAST
|
||||
#define PDB_PROP_ENGSTATE_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OBJENGSTATE(OBJENGSTATE**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OBJENGSTATE(OBJENGSTATE**, Dynamic*, NvU32);
|
||||
#define __objCreate_OBJENGSTATE(ppNewObj, pParent, createFlags) \
|
||||
__nvoc_objCreate_OBJENGSTATE((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
|
||||
|
||||
#define engstateConstructEngine(pGpu, pEngstate, arg0) engstateConstructEngine_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define engstateInitMissing(pGpu, pEngstate) engstateInitMissing_DISPATCH(pGpu, pEngstate)
|
||||
#define engstateStatePreInitLocked(pGpu, pEngstate) engstateStatePreInitLocked_DISPATCH(pGpu, pEngstate)
|
||||
#define engstateStatePreInitUnlocked(pGpu, pEngstate) engstateStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
|
||||
#define engstateStateInitLocked(pGpu, pEngstate) engstateStateInitLocked_DISPATCH(pGpu, pEngstate)
|
||||
#define engstateStateInitUnlocked(pGpu, pEngstate) engstateStateInitUnlocked_DISPATCH(pGpu, pEngstate)
|
||||
#define engstateStatePreLoad(pGpu, pEngstate, arg0) engstateStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define engstateStateLoad(pGpu, pEngstate, arg0) engstateStateLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define engstateStatePostLoad(pGpu, pEngstate, arg0) engstateStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define engstateStatePreUnload(pGpu, pEngstate, arg0) engstateStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define engstateStateUnload(pGpu, pEngstate, arg0) engstateStateUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define engstateStatePostUnload(pGpu, pEngstate, arg0) engstateStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define engstateStateDestroy(pGpu, pEngstate) engstateStateDestroy_DISPATCH(pGpu, pEngstate)
|
||||
#define engstateAllocTunableState(pGpu, pEngstate, ppTunableState) engstateAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState)
|
||||
#define engstateFreeTunableState(pGpu, pEngstate, pTunableState) engstateFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
|
||||
#define engstateGetTunableState(pGpu, pEngstate, pTunableState) engstateGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
|
||||
#define engstateSetTunableState(pGpu, pEngstate, pTunableState) engstateSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
|
||||
#define engstateReconcileTunableState(pGpu, pEngstate, pTunableState) engstateReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
|
||||
#define engstateCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) engstateCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2)
|
||||
#define engstateIsPresent(pGpu, pEngstate) engstateIsPresent_DISPATCH(pGpu, pEngstate)
|
||||
NV_STATUS engstateConstructEngine_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, ENGDESCRIPTOR arg0);
|
||||
|
||||
static inline NV_STATUS engstateConstructEngine_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, ENGDESCRIPTOR arg0) {
|
||||
return pEngstate->__engstateConstructEngine__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
void engstateInitMissing_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
|
||||
|
||||
static inline void engstateInitMissing_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
|
||||
pEngstate->__engstateInitMissing__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
NV_STATUS engstateStatePreInitLocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
|
||||
|
||||
static inline NV_STATUS engstateStatePreInitLocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
|
||||
return pEngstate->__engstateStatePreInitLocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
NV_STATUS engstateStatePreInitUnlocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
|
||||
|
||||
static inline NV_STATUS engstateStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
|
||||
return pEngstate->__engstateStatePreInitUnlocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
NV_STATUS engstateStateInitLocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
|
||||
|
||||
static inline NV_STATUS engstateStateInitLocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
|
||||
return pEngstate->__engstateStateInitLocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
NV_STATUS engstateStateInitUnlocked_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
|
||||
|
||||
static inline NV_STATUS engstateStateInitUnlocked_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
|
||||
return pEngstate->__engstateStateInitUnlocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
NV_STATUS engstateStatePreLoad_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0);
|
||||
|
||||
static inline NV_STATUS engstateStatePreLoad_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__engstateStatePreLoad__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
NV_STATUS engstateStateLoad_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0);
|
||||
|
||||
static inline NV_STATUS engstateStateLoad_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__engstateStateLoad__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
NV_STATUS engstateStatePostLoad_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0);
|
||||
|
||||
static inline NV_STATUS engstateStatePostLoad_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__engstateStatePostLoad__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
NV_STATUS engstateStatePreUnload_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0);
|
||||
|
||||
static inline NV_STATUS engstateStatePreUnload_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__engstateStatePreUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
NV_STATUS engstateStateUnload_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0);
|
||||
|
||||
static inline NV_STATUS engstateStateUnload_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__engstateStateUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
NV_STATUS engstateStatePostUnload_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0);
|
||||
|
||||
static inline NV_STATUS engstateStatePostUnload_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__engstateStatePostUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
void engstateStateDestroy_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
|
||||
|
||||
static inline void engstateStateDestroy_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
|
||||
pEngstate->__engstateStateDestroy__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
NV_STATUS engstateAllocTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void **ppTunableState);
|
||||
|
||||
static inline NV_STATUS engstateAllocTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void **ppTunableState) {
|
||||
return pEngstate->__engstateAllocTunableState__(pGpu, pEngstate, ppTunableState);
|
||||
}
|
||||
|
||||
void engstateFreeTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState);
|
||||
|
||||
static inline void engstateFreeTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) {
|
||||
pEngstate->__engstateFreeTunableState__(pGpu, pEngstate, pTunableState);
|
||||
}
|
||||
|
||||
NV_STATUS engstateGetTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState);
|
||||
|
||||
static inline NV_STATUS engstateGetTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) {
|
||||
return pEngstate->__engstateGetTunableState__(pGpu, pEngstate, pTunableState);
|
||||
}
|
||||
|
||||
NV_STATUS engstateSetTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState);
|
||||
|
||||
static inline NV_STATUS engstateSetTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) {
|
||||
return pEngstate->__engstateSetTunableState__(pGpu, pEngstate, pTunableState);
|
||||
}
|
||||
|
||||
NV_STATUS engstateReconcileTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState);
|
||||
|
||||
static inline NV_STATUS engstateReconcileTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunableState) {
|
||||
return pEngstate->__engstateReconcileTunableState__(pGpu, pEngstate, pTunableState);
|
||||
}
|
||||
|
||||
NV_STATUS engstateCompareTunableState_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunables1, void *pTunables2);
|
||||
|
||||
static inline NV_STATUS engstateCompareTunableState_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate, void *pTunables1, void *pTunables2) {
|
||||
return pEngstate->__engstateCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2);
|
||||
}
|
||||
|
||||
NvBool engstateIsPresent_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
|
||||
|
||||
static inline NvBool engstateIsPresent_DISPATCH(POBJGPU pGpu, POBJENGSTATE pEngstate) {
|
||||
return pEngstate->__engstateIsPresent__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
NV_STATUS engstateConstructBase_IMPL(struct OBJENGSTATE *arg0, struct OBJGPU *arg1, ENGDESCRIPTOR arg2);
|
||||
#ifdef __nvoc_eng_state_h_disabled
|
||||
static inline NV_STATUS engstateConstructBase(struct OBJENGSTATE *arg0, struct OBJGPU *arg1, ENGDESCRIPTOR arg2) {
|
||||
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_eng_state_h_disabled
|
||||
#define engstateConstructBase(arg0, arg1, arg2) engstateConstructBase_IMPL(arg0, arg1, arg2)
|
||||
#endif //__nvoc_eng_state_h_disabled
|
||||
|
||||
void engstateLogStateTransitionPre_IMPL(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2);
|
||||
#ifdef __nvoc_eng_state_h_disabled
|
||||
static inline void engstateLogStateTransitionPre(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2) {
|
||||
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
|
||||
}
|
||||
#else //__nvoc_eng_state_h_disabled
|
||||
#define engstateLogStateTransitionPre(arg0, arg1, arg2) engstateLogStateTransitionPre_IMPL(arg0, arg1, arg2)
|
||||
#endif //__nvoc_eng_state_h_disabled
|
||||
|
||||
void engstateLogStateTransitionPost_IMPL(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2);
|
||||
#ifdef __nvoc_eng_state_h_disabled
|
||||
static inline void engstateLogStateTransitionPost(struct OBJENGSTATE *arg0, ENGSTATE_STATE arg1, ENGSTATE_TRANSITION_DATA *arg2) {
|
||||
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
|
||||
}
|
||||
#else //__nvoc_eng_state_h_disabled
|
||||
#define engstateLogStateTransitionPost(arg0, arg1, arg2) engstateLogStateTransitionPost_IMPL(arg0, arg1, arg2)
|
||||
#endif //__nvoc_eng_state_h_disabled
|
||||
|
||||
const char *engstateGetName_IMPL(struct OBJENGSTATE *arg0);
|
||||
#ifdef __nvoc_eng_state_h_disabled
|
||||
static inline const char *engstateGetName(struct OBJENGSTATE *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_eng_state_h_disabled
|
||||
#define engstateGetName(arg0) engstateGetName_IMPL(arg0)
|
||||
#endif //__nvoc_eng_state_h_disabled
|
||||
|
||||
void engstateDestruct_IMPL(POBJENGSTATE pEngstate);
|
||||
#define __nvoc_engstateDestruct(pEngstate) engstateDestruct_IMPL(pEngstate)
|
||||
NV_STATUS engstateStatePreInit_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
|
||||
#ifdef __nvoc_eng_state_h_disabled
|
||||
static inline NV_STATUS engstateStatePreInit(POBJGPU pGpu, POBJENGSTATE pEngstate) {
|
||||
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_eng_state_h_disabled
|
||||
#define engstateStatePreInit(pGpu, pEngstate) engstateStatePreInit_IMPL(pGpu, pEngstate)
|
||||
#endif //__nvoc_eng_state_h_disabled
|
||||
|
||||
NV_STATUS engstateStateInit_IMPL(POBJGPU pGpu, POBJENGSTATE pEngstate);
|
||||
#ifdef __nvoc_eng_state_h_disabled
|
||||
static inline NV_STATUS engstateStateInit(POBJGPU pGpu, POBJENGSTATE pEngstate) {
|
||||
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_eng_state_h_disabled
|
||||
#define engstateStateInit(pGpu, pEngstate) engstateStateInit_IMPL(pGpu, pEngstate)
|
||||
#endif //__nvoc_eng_state_h_disabled
|
||||
|
||||
ENGDESCRIPTOR engstateGetDescriptor_IMPL(POBJENGSTATE pEngstate);
|
||||
#ifdef __nvoc_eng_state_h_disabled
|
||||
static inline ENGDESCRIPTOR engstateGetDescriptor(POBJENGSTATE pEngstate) {
|
||||
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
|
||||
ENGDESCRIPTOR ret;
|
||||
portMemSet(&ret, 0, sizeof(ENGDESCRIPTOR));
|
||||
return ret;
|
||||
}
|
||||
#else //__nvoc_eng_state_h_disabled
|
||||
#define engstateGetDescriptor(pEngstate) engstateGetDescriptor_IMPL(pEngstate)
|
||||
#endif //__nvoc_eng_state_h_disabled
|
||||
|
||||
struct OBJFIFO *engstateGetFifo_IMPL(POBJENGSTATE pEngstate);
|
||||
#ifdef __nvoc_eng_state_h_disabled
|
||||
static inline struct OBJFIFO *engstateGetFifo(POBJENGSTATE pEngstate) {
|
||||
NV_ASSERT_FAILED_PRECOMP("OBJENGSTATE was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_eng_state_h_disabled
|
||||
#define engstateGetFifo(pEngstate) engstateGetFifo_IMPL(pEngstate)
|
||||
#endif //__nvoc_eng_state_h_disabled
|
||||
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#endif // _ENG_STATE_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_ENG_STATE_NVOC_H_
|
||||
@@ -1,379 +0,0 @@
|
||||
#define NVOC_EVENT_BUFFER_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_event_buffer_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x63502b = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
void __nvoc_init_EventBuffer(EventBuffer*);
|
||||
void __nvoc_init_funcTable_EventBuffer(EventBuffer*);
|
||||
NV_STATUS __nvoc_ctor_EventBuffer(EventBuffer*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_EventBuffer(EventBuffer*);
|
||||
void __nvoc_dtor_EventBuffer(EventBuffer*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_EventBuffer;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_EventBuffer = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_EventBuffer,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_EventBuffer,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_EventBuffer_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(EventBuffer, __nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_EventBuffer = {
|
||||
/*numRelatives=*/ 5,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_EventBuffer_EventBuffer,
|
||||
&__nvoc_rtti_EventBuffer_RmResource,
|
||||
&__nvoc_rtti_EventBuffer_RmResourceCommon,
|
||||
&__nvoc_rtti_EventBuffer_RsResource,
|
||||
&__nvoc_rtti_EventBuffer_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_EventBuffer =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(EventBuffer),
|
||||
/*classId=*/ classId(EventBuffer),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "EventBuffer",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_EventBuffer,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_EventBuffer,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_EventBuffer
|
||||
};
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_eventbufferShareCallback(struct EventBuffer *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_eventbufferCheckMemInterUnmap(struct EventBuffer *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_EventBuffer_RmResource.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventbufferControl(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_eventbufferGetMemInterMapParams(struct EventBuffer *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_eventbufferGetMemoryMappingDescriptor(struct EventBuffer *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_EventBuffer_RmResource.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_eventbufferGetRefCount(struct EventBuffer *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventbufferControlFilter(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_eventbufferAddAdditionalDependants(struct RsClient *pClient, struct EventBuffer *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventbufferUnmap(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
|
||||
return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_eventbufferControl_Prologue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RsResource_eventbufferCanCopy(struct EventBuffer *pResource) {
|
||||
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventbufferMapTo(struct EventBuffer *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_eventbufferPreDestruct(struct EventBuffer *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventbufferUnmapFrom(struct EventBuffer *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_eventbufferControl_Epilogue(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventbufferControlLookup(struct EventBuffer *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventbufferMap(struct EventBuffer *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
|
||||
return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RsResource.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_eventbufferAccessCallback(struct EventBuffer *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_EventBuffer_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG)
|
||||
#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0)
|
||||
#endif
|
||||
|
||||
static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_EventBuffer[] =
|
||||
{
|
||||
{ /* [0] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdEnableEvent_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
|
||||
/*flags=*/ 0x11u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x90cd0101u,
|
||||
/*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_ENABLE_EVENTS_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "eventbuffertBufferCtrlCmdEnableEvent"
|
||||
#endif
|
||||
},
|
||||
{ /* [1] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdUpdateGet_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
|
||||
/*flags=*/ 0x11u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x90cd0102u,
|
||||
/*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_UPDATE_GET_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "eventbuffertBufferCtrlCmdUpdateGet"
|
||||
#endif
|
||||
},
|
||||
{ /* [2] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdFlush_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
|
||||
/*flags=*/ 0x10u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x90cd0104u,
|
||||
/*paramSize=*/ 0,
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "eventbuffertBufferCtrlCmdFlush"
|
||||
#endif
|
||||
},
|
||||
{ /* [3] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
|
||||
/*flags=*/ 0x10u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x90cd0105u,
|
||||
/*paramSize=*/ sizeof(NV_EVENT_BUFFER_CTRL_CMD_POST_TELEMETRY_EVENT_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_EventBuffer.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "eventbuffertBufferCtrlCmdPostTelemetryEvent"
|
||||
#endif
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_EventBuffer =
|
||||
{
|
||||
/*numEntries=*/ 4,
|
||||
/*pExportEntries=*/ __nvoc_exported_method_def_EventBuffer
|
||||
};
|
||||
|
||||
void __nvoc_dtor_RmResource(RmResource*);
|
||||
void __nvoc_dtor_EventBuffer(EventBuffer *pThis) {
|
||||
__nvoc_eventbufferDestruct(pThis);
|
||||
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_EventBuffer(EventBuffer *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_EventBuffer(EventBuffer *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_EventBuffer_fail_RmResource;
|
||||
__nvoc_init_dataField_EventBuffer(pThis);
|
||||
|
||||
status = __nvoc_eventbufferConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_EventBuffer_fail__init;
|
||||
goto __nvoc_ctor_EventBuffer_exit; // Success
|
||||
|
||||
__nvoc_ctor_EventBuffer_fail__init:
|
||||
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
__nvoc_ctor_EventBuffer_fail_RmResource:
|
||||
__nvoc_ctor_EventBuffer_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_EventBuffer_1(EventBuffer *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
|
||||
pThis->__eventbuffertBufferCtrlCmdEnableEvent__ = &eventbuffertBufferCtrlCmdEnableEvent_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x11u)
|
||||
pThis->__eventbuffertBufferCtrlCmdUpdateGet__ = &eventbuffertBufferCtrlCmdUpdateGet_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
|
||||
pThis->__eventbuffertBufferCtrlCmdFlush__ = &eventbuffertBufferCtrlCmdFlush_IMPL;
|
||||
#endif
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x10u)
|
||||
pThis->__eventbuffertBufferCtrlCmdPostTelemetryEvent__ = &eventbuffertBufferCtrlCmdPostTelemetryEvent_IMPL;
|
||||
#endif
|
||||
|
||||
pThis->__eventbufferShareCallback__ = &__nvoc_thunk_RmResource_eventbufferShareCallback;
|
||||
|
||||
pThis->__eventbufferCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_eventbufferCheckMemInterUnmap;
|
||||
|
||||
pThis->__eventbufferControl__ = &__nvoc_thunk_RsResource_eventbufferControl;
|
||||
|
||||
pThis->__eventbufferGetMemInterMapParams__ = &__nvoc_thunk_RmResource_eventbufferGetMemInterMapParams;
|
||||
|
||||
pThis->__eventbufferGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_eventbufferGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__eventbufferGetRefCount__ = &__nvoc_thunk_RsResource_eventbufferGetRefCount;
|
||||
|
||||
pThis->__eventbufferControlFilter__ = &__nvoc_thunk_RsResource_eventbufferControlFilter;
|
||||
|
||||
pThis->__eventbufferAddAdditionalDependants__ = &__nvoc_thunk_RsResource_eventbufferAddAdditionalDependants;
|
||||
|
||||
pThis->__eventbufferUnmap__ = &__nvoc_thunk_RsResource_eventbufferUnmap;
|
||||
|
||||
pThis->__eventbufferControl_Prologue__ = &__nvoc_thunk_RmResource_eventbufferControl_Prologue;
|
||||
|
||||
pThis->__eventbufferCanCopy__ = &__nvoc_thunk_RsResource_eventbufferCanCopy;
|
||||
|
||||
pThis->__eventbufferMapTo__ = &__nvoc_thunk_RsResource_eventbufferMapTo;
|
||||
|
||||
pThis->__eventbufferPreDestruct__ = &__nvoc_thunk_RsResource_eventbufferPreDestruct;
|
||||
|
||||
pThis->__eventbufferUnmapFrom__ = &__nvoc_thunk_RsResource_eventbufferUnmapFrom;
|
||||
|
||||
pThis->__eventbufferControl_Epilogue__ = &__nvoc_thunk_RmResource_eventbufferControl_Epilogue;
|
||||
|
||||
pThis->__eventbufferControlLookup__ = &__nvoc_thunk_RsResource_eventbufferControlLookup;
|
||||
|
||||
pThis->__eventbufferMap__ = &__nvoc_thunk_RsResource_eventbufferMap;
|
||||
|
||||
pThis->__eventbufferAccessCallback__ = &__nvoc_thunk_RmResource_eventbufferAccessCallback;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_EventBuffer(EventBuffer *pThis) {
|
||||
__nvoc_init_funcTable_EventBuffer_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_RmResource(RmResource*);
|
||||
void __nvoc_init_EventBuffer(EventBuffer *pThis) {
|
||||
pThis->__nvoc_pbase_EventBuffer = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource;
|
||||
__nvoc_init_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
__nvoc_init_funcTable_EventBuffer(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_EventBuffer(EventBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
EventBuffer *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(EventBuffer));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(EventBuffer));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_EventBuffer);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_EventBuffer(pThis);
|
||||
status = __nvoc_ctor_EventBuffer(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_EventBuffer_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_EventBuffer_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_EventBuffer(EventBuffer **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_EventBuffer(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,692 +0,0 @@
|
||||
#define NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_event_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xd5f150 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsShared;
|
||||
|
||||
void __nvoc_init_NotifShare(NotifShare*);
|
||||
void __nvoc_init_funcTable_NotifShare(NotifShare*);
|
||||
NV_STATUS __nvoc_ctor_NotifShare(NotifShare*);
|
||||
void __nvoc_init_dataField_NotifShare(NotifShare*);
|
||||
void __nvoc_dtor_NotifShare(NotifShare*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_NotifShare;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_NotifShare_NotifShare = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_NotifShare,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_NotifShare,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_NotifShare_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(NotifShare, __nvoc_base_RsShared.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_NotifShare_RsShared = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsShared,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(NotifShare, __nvoc_base_RsShared),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_NotifShare = {
|
||||
/*numRelatives=*/ 3,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_NotifShare_NotifShare,
|
||||
&__nvoc_rtti_NotifShare_RsShared,
|
||||
&__nvoc_rtti_NotifShare_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(NotifShare),
|
||||
/*classId=*/ classId(NotifShare),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "NotifShare",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_NotifShare,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_NotifShare,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_NotifShare
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_NotifShare =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_RsShared(RsShared*);
|
||||
void __nvoc_dtor_NotifShare(NotifShare *pThis) {
|
||||
__nvoc_shrnotifDestruct(pThis);
|
||||
__nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_NotifShare(NotifShare *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_RsShared(RsShared* );
|
||||
NV_STATUS __nvoc_ctor_NotifShare(NotifShare *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_RsShared(&pThis->__nvoc_base_RsShared);
|
||||
if (status != NV_OK) goto __nvoc_ctor_NotifShare_fail_RsShared;
|
||||
__nvoc_init_dataField_NotifShare(pThis);
|
||||
|
||||
status = __nvoc_shrnotifConstruct(pThis);
|
||||
if (status != NV_OK) goto __nvoc_ctor_NotifShare_fail__init;
|
||||
goto __nvoc_ctor_NotifShare_exit; // Success
|
||||
|
||||
__nvoc_ctor_NotifShare_fail__init:
|
||||
__nvoc_dtor_RsShared(&pThis->__nvoc_base_RsShared);
|
||||
__nvoc_ctor_NotifShare_fail_RsShared:
|
||||
__nvoc_ctor_NotifShare_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_NotifShare_1(NotifShare *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_NotifShare(NotifShare *pThis) {
|
||||
__nvoc_init_funcTable_NotifShare_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_RsShared(RsShared*);
|
||||
void __nvoc_init_NotifShare(NotifShare *pThis) {
|
||||
pThis->__nvoc_pbase_NotifShare = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RsShared.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsShared = &pThis->__nvoc_base_RsShared;
|
||||
__nvoc_init_RsShared(&pThis->__nvoc_base_RsShared);
|
||||
__nvoc_init_funcTable_NotifShare(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_NotifShare(NotifShare **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
NotifShare *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(NotifShare));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(NotifShare));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_NotifShare);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_RsShared.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_RsShared.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_NotifShare(pThis);
|
||||
status = __nvoc_ctor_NotifShare(pThis);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_NotifShare_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_NotifShare_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_NotifShare(NotifShare **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_NotifShare(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xa4ecfc = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Event;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
void __nvoc_init_Event(Event*);
|
||||
void __nvoc_init_funcTable_Event(Event*);
|
||||
NV_STATUS __nvoc_ctor_Event(Event*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_Event(Event*);
|
||||
void __nvoc_dtor_Event(Event*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Event;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Event_Event = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Event,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Event,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Event_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Event_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Event_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Event_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Event, __nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_Event = {
|
||||
/*numRelatives=*/ 5,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_Event_Event,
|
||||
&__nvoc_rtti_Event_RmResource,
|
||||
&__nvoc_rtti_Event_RmResourceCommon,
|
||||
&__nvoc_rtti_Event_RsResource,
|
||||
&__nvoc_rtti_Event_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_Event =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(Event),
|
||||
/*classId=*/ classId(Event),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "Event",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Event,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_Event,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_Event
|
||||
};
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_eventShareCallback(struct Event *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_eventCheckMemInterUnmap(struct Event *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Event_RmResource.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventControl(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_eventGetMemInterMapParams(struct Event *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Event_RmResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_eventGetMemoryMappingDescriptor(struct Event *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Event_RmResource.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_eventGetRefCount(struct Event *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventControlFilter(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_eventAddAdditionalDependants(struct RsClient *pClient, struct Event *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventUnmap(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
|
||||
return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_eventControl_Prologue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RsResource_eventCanCopy(struct Event *pResource) {
|
||||
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventMapTo(struct Event *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_eventPreDestruct(struct Event *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventUnmapFrom(struct Event *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_eventControl_Epilogue(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventControlLookup(struct Event *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_eventMap(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
|
||||
return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RsResource.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_eventAccessCallback(struct Event *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Event_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_Event =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_RmResource(RmResource*);
|
||||
void __nvoc_dtor_Event(Event *pThis) {
|
||||
__nvoc_eventDestruct(pThis);
|
||||
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_Event(Event *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_Event(Event *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_Event_fail_RmResource;
|
||||
__nvoc_init_dataField_Event(pThis);
|
||||
|
||||
status = __nvoc_eventConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_Event_fail__init;
|
||||
goto __nvoc_ctor_Event_exit; // Success
|
||||
|
||||
__nvoc_ctor_Event_fail__init:
|
||||
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
__nvoc_ctor_Event_fail_RmResource:
|
||||
__nvoc_ctor_Event_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_Event_1(Event *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__eventShareCallback__ = &__nvoc_thunk_RmResource_eventShareCallback;
|
||||
|
||||
pThis->__eventCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_eventCheckMemInterUnmap;
|
||||
|
||||
pThis->__eventControl__ = &__nvoc_thunk_RsResource_eventControl;
|
||||
|
||||
pThis->__eventGetMemInterMapParams__ = &__nvoc_thunk_RmResource_eventGetMemInterMapParams;
|
||||
|
||||
pThis->__eventGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_eventGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__eventGetRefCount__ = &__nvoc_thunk_RsResource_eventGetRefCount;
|
||||
|
||||
pThis->__eventControlFilter__ = &__nvoc_thunk_RsResource_eventControlFilter;
|
||||
|
||||
pThis->__eventAddAdditionalDependants__ = &__nvoc_thunk_RsResource_eventAddAdditionalDependants;
|
||||
|
||||
pThis->__eventUnmap__ = &__nvoc_thunk_RsResource_eventUnmap;
|
||||
|
||||
pThis->__eventControl_Prologue__ = &__nvoc_thunk_RmResource_eventControl_Prologue;
|
||||
|
||||
pThis->__eventCanCopy__ = &__nvoc_thunk_RsResource_eventCanCopy;
|
||||
|
||||
pThis->__eventMapTo__ = &__nvoc_thunk_RsResource_eventMapTo;
|
||||
|
||||
pThis->__eventPreDestruct__ = &__nvoc_thunk_RsResource_eventPreDestruct;
|
||||
|
||||
pThis->__eventUnmapFrom__ = &__nvoc_thunk_RsResource_eventUnmapFrom;
|
||||
|
||||
pThis->__eventControl_Epilogue__ = &__nvoc_thunk_RmResource_eventControl_Epilogue;
|
||||
|
||||
pThis->__eventControlLookup__ = &__nvoc_thunk_RsResource_eventControlLookup;
|
||||
|
||||
pThis->__eventMap__ = &__nvoc_thunk_RsResource_eventMap;
|
||||
|
||||
pThis->__eventAccessCallback__ = &__nvoc_thunk_RmResource_eventAccessCallback;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_Event(Event *pThis) {
|
||||
__nvoc_init_funcTable_Event_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_RmResource(RmResource*);
|
||||
void __nvoc_init_Event(Event *pThis) {
|
||||
pThis->__nvoc_pbase_Event = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource;
|
||||
__nvoc_init_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
__nvoc_init_funcTable_Event(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_Event(Event **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
Event *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(Event));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(Event));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Event);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_Event(pThis);
|
||||
status = __nvoc_ctor_Event(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_Event_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_Event_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_Event(Event **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_Event(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xf8f965 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier;
|
||||
|
||||
void __nvoc_init_INotifier(INotifier*);
|
||||
void __nvoc_init_funcTable_INotifier(INotifier*);
|
||||
NV_STATUS __nvoc_ctor_INotifier(INotifier*, struct CALL_CONTEXT * arg_pCallContext);
|
||||
void __nvoc_init_dataField_INotifier(INotifier*);
|
||||
void __nvoc_dtor_INotifier(INotifier*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_INotifier;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_INotifier_INotifier = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_INotifier,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_INotifier,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_INotifier = {
|
||||
/*numRelatives=*/ 1,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_INotifier_INotifier,
|
||||
},
|
||||
};
|
||||
|
||||
// Not instantiable because it's not derived from class "Object"
|
||||
// Not instantiable because it's an abstract class with following pure virtual functions:
|
||||
// inotifyGetNotificationListPtr
|
||||
// inotifySetNotificationShare
|
||||
// inotifyGetNotificationShare
|
||||
// inotifyUnregisterEvent
|
||||
// inotifyGetOrAllocNotifShare
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(INotifier),
|
||||
/*classId=*/ classId(INotifier),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "INotifier",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_INotifier,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_INotifier
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_INotifier =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_INotifier(INotifier *pThis) {
|
||||
__nvoc_inotifyDestruct(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_INotifier(INotifier *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_INotifier(INotifier *pThis, struct CALL_CONTEXT * arg_pCallContext) {
|
||||
NV_STATUS status = NV_OK;
|
||||
__nvoc_init_dataField_INotifier(pThis);
|
||||
|
||||
status = __nvoc_inotifyConstruct(pThis, arg_pCallContext);
|
||||
if (status != NV_OK) goto __nvoc_ctor_INotifier_fail__init;
|
||||
goto __nvoc_ctor_INotifier_exit; // Success
|
||||
|
||||
__nvoc_ctor_INotifier_fail__init:
|
||||
__nvoc_ctor_INotifier_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_INotifier_1(INotifier *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__inotifyGetNotificationListPtr__ = NULL;
|
||||
|
||||
pThis->__inotifySetNotificationShare__ = NULL;
|
||||
|
||||
pThis->__inotifyGetNotificationShare__ = NULL;
|
||||
|
||||
pThis->__inotifyUnregisterEvent__ = NULL;
|
||||
|
||||
pThis->__inotifyGetOrAllocNotifShare__ = NULL;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_INotifier(INotifier *pThis) {
|
||||
__nvoc_init_funcTable_INotifier_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_INotifier(INotifier *pThis) {
|
||||
pThis->__nvoc_pbase_INotifier = pThis;
|
||||
__nvoc_init_funcTable_INotifier(pThis);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xa8683b = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier;
|
||||
|
||||
void __nvoc_init_Notifier(Notifier*);
|
||||
void __nvoc_init_funcTable_Notifier(Notifier*);
|
||||
NV_STATUS __nvoc_ctor_Notifier(Notifier*, struct CALL_CONTEXT * arg_pCallContext);
|
||||
void __nvoc_init_dataField_Notifier(Notifier*);
|
||||
void __nvoc_dtor_Notifier(Notifier*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Notifier;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Notifier_Notifier = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Notifier,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Notifier,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Notifier_INotifier = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_INotifier,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Notifier, __nvoc_base_INotifier),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_Notifier = {
|
||||
/*numRelatives=*/ 2,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_Notifier_Notifier,
|
||||
&__nvoc_rtti_Notifier_INotifier,
|
||||
},
|
||||
};
|
||||
|
||||
// Not instantiable because it's not derived from class "Object"
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(Notifier),
|
||||
/*classId=*/ classId(Notifier),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "Notifier",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_Notifier,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_Notifier
|
||||
};
|
||||
|
||||
static PEVENTNOTIFICATION *__nvoc_thunk_Notifier_inotifyGetNotificationListPtr(struct INotifier *pNotifier) {
|
||||
return notifyGetNotificationListPtr((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset));
|
||||
}
|
||||
|
||||
static struct NotifShare *__nvoc_thunk_Notifier_inotifyGetNotificationShare(struct INotifier *pNotifier) {
|
||||
return notifyGetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_Notifier_inotifySetNotificationShare(struct INotifier *pNotifier, struct NotifShare *pNotifShare) {
|
||||
notifySetNotificationShare((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset), pNotifShare);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Notifier_inotifyUnregisterEvent(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
|
||||
return notifyUnregisterEvent((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset), hNotifierClient, hNotifierResource, hEventClient, hEvent);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Notifier_inotifyGetOrAllocNotifShare(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
|
||||
return notifyGetOrAllocNotifShare((struct Notifier *)(((unsigned char *)pNotifier) - __nvoc_rtti_Notifier_INotifier.offset), hNotifierClient, hNotifierResource, ppNotifShare);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_Notifier =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_INotifier(INotifier*);
|
||||
void __nvoc_dtor_Notifier(Notifier *pThis) {
|
||||
__nvoc_notifyDestruct(pThis);
|
||||
__nvoc_dtor_INotifier(&pThis->__nvoc_base_INotifier);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_Notifier(Notifier *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_INotifier(INotifier* , struct CALL_CONTEXT *);
|
||||
NV_STATUS __nvoc_ctor_Notifier(Notifier *pThis, struct CALL_CONTEXT * arg_pCallContext) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_INotifier(&pThis->__nvoc_base_INotifier, arg_pCallContext);
|
||||
if (status != NV_OK) goto __nvoc_ctor_Notifier_fail_INotifier;
|
||||
__nvoc_init_dataField_Notifier(pThis);
|
||||
|
||||
status = __nvoc_notifyConstruct(pThis, arg_pCallContext);
|
||||
if (status != NV_OK) goto __nvoc_ctor_Notifier_fail__init;
|
||||
goto __nvoc_ctor_Notifier_exit; // Success
|
||||
|
||||
__nvoc_ctor_Notifier_fail__init:
|
||||
__nvoc_dtor_INotifier(&pThis->__nvoc_base_INotifier);
|
||||
__nvoc_ctor_Notifier_fail_INotifier:
|
||||
__nvoc_ctor_Notifier_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_Notifier_1(Notifier *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__notifyGetNotificationListPtr__ = ¬ifyGetNotificationListPtr_IMPL;
|
||||
|
||||
pThis->__notifyGetNotificationShare__ = ¬ifyGetNotificationShare_IMPL;
|
||||
|
||||
pThis->__notifySetNotificationShare__ = ¬ifySetNotificationShare_IMPL;
|
||||
|
||||
pThis->__notifyUnregisterEvent__ = ¬ifyUnregisterEvent_IMPL;
|
||||
|
||||
pThis->__notifyGetOrAllocNotifShare__ = ¬ifyGetOrAllocNotifShare_IMPL;
|
||||
|
||||
pThis->__nvoc_base_INotifier.__inotifyGetNotificationListPtr__ = &__nvoc_thunk_Notifier_inotifyGetNotificationListPtr;
|
||||
|
||||
pThis->__nvoc_base_INotifier.__inotifyGetNotificationShare__ = &__nvoc_thunk_Notifier_inotifyGetNotificationShare;
|
||||
|
||||
pThis->__nvoc_base_INotifier.__inotifySetNotificationShare__ = &__nvoc_thunk_Notifier_inotifySetNotificationShare;
|
||||
|
||||
pThis->__nvoc_base_INotifier.__inotifyUnregisterEvent__ = &__nvoc_thunk_Notifier_inotifyUnregisterEvent;
|
||||
|
||||
pThis->__nvoc_base_INotifier.__inotifyGetOrAllocNotifShare__ = &__nvoc_thunk_Notifier_inotifyGetOrAllocNotifShare;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_Notifier(Notifier *pThis) {
|
||||
__nvoc_init_funcTable_Notifier_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_INotifier(INotifier*);
|
||||
void __nvoc_init_Notifier(Notifier *pThis) {
|
||||
pThis->__nvoc_pbase_Notifier = pThis;
|
||||
pThis->__nvoc_pbase_INotifier = &pThis->__nvoc_base_INotifier;
|
||||
__nvoc_init_INotifier(&pThis->__nvoc_base_INotifier);
|
||||
__nvoc_init_funcTable_Notifier(pThis);
|
||||
}
|
||||
|
||||
@@ -1,529 +0,0 @@
|
||||
#ifndef _G_EVENT_NVOC_H_
|
||||
#define _G_EVENT_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "g_event_nvoc.h"
|
||||
|
||||
#ifndef _EVENT_H_
|
||||
#define _EVENT_H_
|
||||
|
||||
#include "class/cl0000.h" // NV0000_NOTIFIERS_MAXCOUNT
|
||||
|
||||
#include "resserv/resserv.h"
|
||||
#include "nvoc/prelude.h"
|
||||
#include "resserv/rs_server.h"
|
||||
#include "rmapi/resource.h"
|
||||
|
||||
typedef struct _def_system_event_queue SYSTEM_EVENTS_QUEUE;
|
||||
|
||||
struct EVENTNOTIFICATION
|
||||
{
|
||||
NvHandle hEventClient;
|
||||
NvHandle hEvent;
|
||||
NvU32 subdeviceInst;
|
||||
NvU32 NotifyIndex; // NVnnnn_NOTIFIERS_xyz
|
||||
NvU32 NotifyType; // Event class. NV01_EVENT_OS_EVENT for example.
|
||||
NvBool bUserOsEventHandle; // Event was allocated from user app.
|
||||
NvBool bBroadcastEvent; // Wait for all subdevices before sending event.
|
||||
NvBool bClientRM; // Event was allocated from client RM.
|
||||
NvBool bSubdeviceSpecificEvent; // SubdeviceSpecificValue is valid.
|
||||
NvU32 SubdeviceSpecificValue; // NV0005_NOTIFY_INDEX_SUBDEVICE
|
||||
NvBool bEventDataRequired; // nv_post_event allocates memory for Data.
|
||||
NvBool bNonStallIntrEvent;
|
||||
NvU32 NotifyTriggerCount; // Used with bBroadcastEvent.
|
||||
NvP64 Data;
|
||||
struct EVENTNOTIFICATION *Next;
|
||||
};
|
||||
typedef struct EVENTNOTIFICATION EVENTNOTIFICATION, *PEVENTNOTIFICATION;
|
||||
|
||||
struct INotifier;
|
||||
|
||||
#ifndef __NVOC_CLASS_INotifier_TYPEDEF__
|
||||
#define __NVOC_CLASS_INotifier_TYPEDEF__
|
||||
typedef struct INotifier INotifier;
|
||||
#endif /* __NVOC_CLASS_INotifier_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_INotifier
|
||||
#define __nvoc_class_id_INotifier 0xf8f965
|
||||
#endif /* __nvoc_class_id_INotifier */
|
||||
|
||||
|
||||
|
||||
#define NV_SYSTEM_EVENT_QUEUE_SIZE 16
|
||||
struct _def_system_event_queue
|
||||
{
|
||||
NvU32 Head;
|
||||
NvU32 Tail;
|
||||
struct event_queue
|
||||
{
|
||||
NvU32 event;
|
||||
NvU32 status;
|
||||
} EventQueue[NV_SYSTEM_EVENT_QUEUE_SIZE];
|
||||
};
|
||||
|
||||
struct _def_client_system_event_info
|
||||
{
|
||||
SYSTEM_EVENTS_QUEUE systemEventsQueue;
|
||||
NvU32 notifyActions[NV0000_NOTIFIERS_MAXCOUNT];
|
||||
};
|
||||
|
||||
/**
|
||||
* This class represents data that is shared between one notifier and any
|
||||
* events that are registered with the notifier.
|
||||
*
|
||||
* Instances of this class are ref-counted and will be kept alive until
|
||||
* the notifier and all of its events have been freed.
|
||||
*/
|
||||
#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct NotifShare {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct RsShared __nvoc_base_RsShared;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsShared *__nvoc_pbase_RsShared;
|
||||
struct NotifShare *__nvoc_pbase_NotifShare;
|
||||
struct INotifier *pNotifier;
|
||||
NvHandle hNotifierClient;
|
||||
NvHandle hNotifierResource;
|
||||
EVENTNOTIFICATION *pEventList;
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_NotifShare_TYPEDEF__
|
||||
#define __NVOC_CLASS_NotifShare_TYPEDEF__
|
||||
typedef struct NotifShare NotifShare;
|
||||
#endif /* __NVOC_CLASS_NotifShare_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_NotifShare
|
||||
#define __nvoc_class_id_NotifShare 0xd5f150
|
||||
#endif /* __nvoc_class_id_NotifShare */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_NotifShare;
|
||||
|
||||
#define __staticCast_NotifShare(pThis) \
|
||||
((pThis)->__nvoc_pbase_NotifShare)
|
||||
|
||||
#ifdef __nvoc_event_h_disabled
|
||||
#define __dynamicCast_NotifShare(pThis) ((NotifShare*)NULL)
|
||||
#else //__nvoc_event_h_disabled
|
||||
#define __dynamicCast_NotifShare(pThis) \
|
||||
((NotifShare*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(NotifShare)))
|
||||
#endif //__nvoc_event_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_NotifShare(NotifShare**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_NotifShare(NotifShare**, Dynamic*, NvU32);
|
||||
#define __objCreate_NotifShare(ppNewObj, pParent, createFlags) \
|
||||
__nvoc_objCreate_NotifShare((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
|
||||
|
||||
NV_STATUS shrnotifConstruct_IMPL(struct NotifShare *arg_pNotifShare);
|
||||
#define __nvoc_shrnotifConstruct(arg_pNotifShare) shrnotifConstruct_IMPL(arg_pNotifShare)
|
||||
void shrnotifDestruct_IMPL(struct NotifShare *pNotifShare);
|
||||
#define __nvoc_shrnotifDestruct(pNotifShare) shrnotifDestruct_IMPL(pNotifShare)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
/**
|
||||
* This class represents event notification consumers
|
||||
*/
|
||||
#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct Event {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct RmResource __nvoc_base_RmResource;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct Event *__nvoc_pbase_Event;
|
||||
NvBool (*__eventShareCallback__)(struct Event *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NV_STATUS (*__eventCheckMemInterUnmap__)(struct Event *, NvBool);
|
||||
NV_STATUS (*__eventControl__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__eventGetMemInterMapParams__)(struct Event *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__eventGetMemoryMappingDescriptor__)(struct Event *, struct MEMORY_DESCRIPTOR **);
|
||||
NvU32 (*__eventGetRefCount__)(struct Event *);
|
||||
NV_STATUS (*__eventControlFilter__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
void (*__eventAddAdditionalDependants__)(struct RsClient *, struct Event *, RsResourceRef *);
|
||||
NV_STATUS (*__eventUnmap__)(struct Event *, struct CALL_CONTEXT *, RsCpuMapping *);
|
||||
NV_STATUS (*__eventControl_Prologue__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NvBool (*__eventCanCopy__)(struct Event *);
|
||||
NV_STATUS (*__eventMapTo__)(struct Event *, RS_RES_MAP_TO_PARAMS *);
|
||||
void (*__eventPreDestruct__)(struct Event *);
|
||||
NV_STATUS (*__eventUnmapFrom__)(struct Event *, RS_RES_UNMAP_FROM_PARAMS *);
|
||||
void (*__eventControl_Epilogue__)(struct Event *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__eventControlLookup__)(struct Event *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NV_STATUS (*__eventMap__)(struct Event *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *);
|
||||
NvBool (*__eventAccessCallback__)(struct Event *, struct RsClient *, void *, RsAccessRight);
|
||||
struct NotifShare *pNotifierShare;
|
||||
NvHandle hNotifierClient;
|
||||
NvHandle hNotifierResource;
|
||||
NvHandle hEvent;
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_Event_TYPEDEF__
|
||||
#define __NVOC_CLASS_Event_TYPEDEF__
|
||||
typedef struct Event Event;
|
||||
#endif /* __NVOC_CLASS_Event_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_Event
|
||||
#define __nvoc_class_id_Event 0xa4ecfc
|
||||
#endif /* __nvoc_class_id_Event */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Event;
|
||||
|
||||
#define __staticCast_Event(pThis) \
|
||||
((pThis)->__nvoc_pbase_Event)
|
||||
|
||||
#ifdef __nvoc_event_h_disabled
|
||||
#define __dynamicCast_Event(pThis) ((Event*)NULL)
|
||||
#else //__nvoc_event_h_disabled
|
||||
#define __dynamicCast_Event(pThis) \
|
||||
((Event*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Event)))
|
||||
#endif //__nvoc_event_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_Event(Event**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_Event(Event**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
#define __objCreate_Event(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
|
||||
__nvoc_objCreate_Event((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
|
||||
|
||||
#define eventShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) eventShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define eventCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) eventCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
|
||||
#define eventControl(pResource, pCallContext, pParams) eventControl_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define eventGetMemInterMapParams(pRmResource, pParams) eventGetMemInterMapParams_DISPATCH(pRmResource, pParams)
|
||||
#define eventGetMemoryMappingDescriptor(pRmResource, ppMemDesc) eventGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
|
||||
#define eventGetRefCount(pResource) eventGetRefCount_DISPATCH(pResource)
|
||||
#define eventControlFilter(pResource, pCallContext, pParams) eventControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define eventAddAdditionalDependants(pClient, pResource, pReference) eventAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define eventUnmap(pResource, pCallContext, pCpuMapping) eventUnmap_DISPATCH(pResource, pCallContext, pCpuMapping)
|
||||
#define eventControl_Prologue(pResource, pCallContext, pParams) eventControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define eventCanCopy(pResource) eventCanCopy_DISPATCH(pResource)
|
||||
#define eventMapTo(pResource, pParams) eventMapTo_DISPATCH(pResource, pParams)
|
||||
#define eventPreDestruct(pResource) eventPreDestruct_DISPATCH(pResource)
|
||||
#define eventUnmapFrom(pResource, pParams) eventUnmapFrom_DISPATCH(pResource, pParams)
|
||||
#define eventControl_Epilogue(pResource, pCallContext, pParams) eventControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define eventControlLookup(pResource, pParams, ppEntry) eventControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define eventMap(pResource, pCallContext, pParams, pCpuMapping) eventMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping)
|
||||
#define eventAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) eventAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
static inline NvBool eventShareCallback_DISPATCH(struct Event *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pResource->__eventShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NV_STATUS eventCheckMemInterUnmap_DISPATCH(struct Event *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return pRmResource->__eventCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static inline NV_STATUS eventControl_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__eventControl__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS eventGetMemInterMapParams_DISPATCH(struct Event *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pRmResource->__eventGetMemInterMapParams__(pRmResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS eventGetMemoryMappingDescriptor_DISPATCH(struct Event *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pRmResource->__eventGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline NvU32 eventGetRefCount_DISPATCH(struct Event *pResource) {
|
||||
return pResource->__eventGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS eventControlFilter_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__eventControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline void eventAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Event *pResource, RsResourceRef *pReference) {
|
||||
pResource->__eventAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline NV_STATUS eventUnmap_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
|
||||
return pResource->__eventUnmap__(pResource, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NV_STATUS eventControl_Prologue_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__eventControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NvBool eventCanCopy_DISPATCH(struct Event *pResource) {
|
||||
return pResource->__eventCanCopy__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS eventMapTo_DISPATCH(struct Event *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pResource->__eventMapTo__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void eventPreDestruct_DISPATCH(struct Event *pResource) {
|
||||
pResource->__eventPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS eventUnmapFrom_DISPATCH(struct Event *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pResource->__eventUnmapFrom__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void eventControl_Epilogue_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__eventControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS eventControlLookup_DISPATCH(struct Event *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__eventControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NV_STATUS eventMap_DISPATCH(struct Event *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
|
||||
return pResource->__eventMap__(pResource, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NvBool eventAccessCallback_DISPATCH(struct Event *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__eventAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
NV_STATUS eventConstruct_IMPL(struct Event *arg_pEvent, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
|
||||
#define __nvoc_eventConstruct(arg_pEvent, arg_pCallContext, arg_pParams) eventConstruct_IMPL(arg_pEvent, arg_pCallContext, arg_pParams)
|
||||
void eventDestruct_IMPL(struct Event *pEvent);
|
||||
#define __nvoc_eventDestruct(pEvent) eventDestruct_IMPL(pEvent)
|
||||
NV_STATUS eventInit_IMPL(struct Event *pEvent, struct CALL_CONTEXT *pCallContext, NvHandle hNotifierClient, NvHandle hNotifierResource, PEVENTNOTIFICATION **pppEventNotification);
|
||||
#ifdef __nvoc_event_h_disabled
|
||||
static inline NV_STATUS eventInit(struct Event *pEvent, struct CALL_CONTEXT *pCallContext, NvHandle hNotifierClient, NvHandle hNotifierResource, PEVENTNOTIFICATION **pppEventNotification) {
|
||||
NV_ASSERT_FAILED_PRECOMP("Event was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_event_h_disabled
|
||||
#define eventInit(pEvent, pCallContext, hNotifierClient, hNotifierResource, pppEventNotification) eventInit_IMPL(pEvent, pCallContext, hNotifierClient, hNotifierResource, pppEventNotification)
|
||||
#endif //__nvoc_event_h_disabled
|
||||
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
/**
|
||||
* Mix-in interface for resources that send notifications to events
|
||||
*/
|
||||
#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct INotifier {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct INotifier *__nvoc_pbase_INotifier;
|
||||
PEVENTNOTIFICATION *(*__inotifyGetNotificationListPtr__)(struct INotifier *);
|
||||
void (*__inotifySetNotificationShare__)(struct INotifier *, struct NotifShare *);
|
||||
struct NotifShare *(*__inotifyGetNotificationShare__)(struct INotifier *);
|
||||
NV_STATUS (*__inotifyUnregisterEvent__)(struct INotifier *, NvHandle, NvHandle, NvHandle, NvHandle);
|
||||
NV_STATUS (*__inotifyGetOrAllocNotifShare__)(struct INotifier *, NvHandle, NvHandle, struct NotifShare **);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_INotifier_TYPEDEF__
|
||||
#define __NVOC_CLASS_INotifier_TYPEDEF__
|
||||
typedef struct INotifier INotifier;
|
||||
#endif /* __NVOC_CLASS_INotifier_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_INotifier
|
||||
#define __nvoc_class_id_INotifier 0xf8f965
|
||||
#endif /* __nvoc_class_id_INotifier */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_INotifier;
|
||||
|
||||
#define __staticCast_INotifier(pThis) \
|
||||
((pThis)->__nvoc_pbase_INotifier)
|
||||
|
||||
#ifdef __nvoc_event_h_disabled
|
||||
#define __dynamicCast_INotifier(pThis) ((INotifier*)NULL)
|
||||
#else //__nvoc_event_h_disabled
|
||||
#define __dynamicCast_INotifier(pThis) \
|
||||
((INotifier*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(INotifier)))
|
||||
#endif //__nvoc_event_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_INotifier(INotifier**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_INotifier(INotifier**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext);
|
||||
#define __objCreate_INotifier(ppNewObj, pParent, createFlags, arg_pCallContext) \
|
||||
__nvoc_objCreate_INotifier((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext)
|
||||
|
||||
#define inotifyGetNotificationListPtr(pNotifier) inotifyGetNotificationListPtr_DISPATCH(pNotifier)
|
||||
#define inotifySetNotificationShare(pNotifier, pNotifShare) inotifySetNotificationShare_DISPATCH(pNotifier, pNotifShare)
|
||||
#define inotifyGetNotificationShare(pNotifier) inotifyGetNotificationShare_DISPATCH(pNotifier)
|
||||
#define inotifyUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) inotifyUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent)
|
||||
#define inotifyGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) inotifyGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
|
||||
static inline PEVENTNOTIFICATION *inotifyGetNotificationListPtr_DISPATCH(struct INotifier *pNotifier) {
|
||||
return pNotifier->__inotifyGetNotificationListPtr__(pNotifier);
|
||||
}
|
||||
|
||||
static inline void inotifySetNotificationShare_DISPATCH(struct INotifier *pNotifier, struct NotifShare *pNotifShare) {
|
||||
pNotifier->__inotifySetNotificationShare__(pNotifier, pNotifShare);
|
||||
}
|
||||
|
||||
static inline struct NotifShare *inotifyGetNotificationShare_DISPATCH(struct INotifier *pNotifier) {
|
||||
return pNotifier->__inotifyGetNotificationShare__(pNotifier);
|
||||
}
|
||||
|
||||
static inline NV_STATUS inotifyUnregisterEvent_DISPATCH(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
|
||||
return pNotifier->__inotifyUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent);
|
||||
}
|
||||
|
||||
static inline NV_STATUS inotifyGetOrAllocNotifShare_DISPATCH(struct INotifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
|
||||
return pNotifier->__inotifyGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare);
|
||||
}
|
||||
|
||||
NV_STATUS inotifyConstruct_IMPL(struct INotifier *arg_pNotifier, struct CALL_CONTEXT *arg_pCallContext);
|
||||
#define __nvoc_inotifyConstruct(arg_pNotifier, arg_pCallContext) inotifyConstruct_IMPL(arg_pNotifier, arg_pCallContext)
|
||||
void inotifyDestruct_IMPL(struct INotifier *pNotifier);
|
||||
#define __nvoc_inotifyDestruct(pNotifier) inotifyDestruct_IMPL(pNotifier)
|
||||
PEVENTNOTIFICATION inotifyGetNotificationList_IMPL(struct INotifier *pNotifier);
|
||||
#ifdef __nvoc_event_h_disabled
|
||||
static inline PEVENTNOTIFICATION inotifyGetNotificationList(struct INotifier *pNotifier) {
|
||||
NV_ASSERT_FAILED_PRECOMP("INotifier was disabled!");
|
||||
return NULL;
|
||||
}
|
||||
#else //__nvoc_event_h_disabled
|
||||
#define inotifyGetNotificationList(pNotifier) inotifyGetNotificationList_IMPL(pNotifier)
|
||||
#endif //__nvoc_event_h_disabled
|
||||
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
/**
|
||||
* Basic implementation for event notification mix-in
|
||||
*/
|
||||
#ifdef NVOC_EVENT_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct Notifier {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct INotifier __nvoc_base_INotifier;
|
||||
struct INotifier *__nvoc_pbase_INotifier;
|
||||
struct Notifier *__nvoc_pbase_Notifier;
|
||||
PEVENTNOTIFICATION *(*__notifyGetNotificationListPtr__)(struct Notifier *);
|
||||
struct NotifShare *(*__notifyGetNotificationShare__)(struct Notifier *);
|
||||
void (*__notifySetNotificationShare__)(struct Notifier *, struct NotifShare *);
|
||||
NV_STATUS (*__notifyUnregisterEvent__)(struct Notifier *, NvHandle, NvHandle, NvHandle, NvHandle);
|
||||
NV_STATUS (*__notifyGetOrAllocNotifShare__)(struct Notifier *, NvHandle, NvHandle, struct NotifShare **);
|
||||
struct NotifShare *pNotifierShare;
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_Notifier_TYPEDEF__
|
||||
#define __NVOC_CLASS_Notifier_TYPEDEF__
|
||||
typedef struct Notifier Notifier;
|
||||
#endif /* __NVOC_CLASS_Notifier_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_Notifier
|
||||
#define __nvoc_class_id_Notifier 0xa8683b
|
||||
#endif /* __nvoc_class_id_Notifier */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Notifier;
|
||||
|
||||
#define __staticCast_Notifier(pThis) \
|
||||
((pThis)->__nvoc_pbase_Notifier)
|
||||
|
||||
#ifdef __nvoc_event_h_disabled
|
||||
#define __dynamicCast_Notifier(pThis) ((Notifier*)NULL)
|
||||
#else //__nvoc_event_h_disabled
|
||||
#define __dynamicCast_Notifier(pThis) \
|
||||
((Notifier*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Notifier)))
|
||||
#endif //__nvoc_event_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_Notifier(Notifier**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_Notifier(Notifier**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext);
|
||||
#define __objCreate_Notifier(ppNewObj, pParent, createFlags, arg_pCallContext) \
|
||||
__nvoc_objCreate_Notifier((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext)
|
||||
|
||||
#define notifyGetNotificationListPtr(pNotifier) notifyGetNotificationListPtr_DISPATCH(pNotifier)
|
||||
#define notifyGetNotificationShare(pNotifier) notifyGetNotificationShare_DISPATCH(pNotifier)
|
||||
#define notifySetNotificationShare(pNotifier, pNotifShare) notifySetNotificationShare_DISPATCH(pNotifier, pNotifShare)
|
||||
#define notifyUnregisterEvent(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent) notifyUnregisterEvent_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent)
|
||||
#define notifyGetOrAllocNotifShare(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare) notifyGetOrAllocNotifShare_DISPATCH(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare)
|
||||
PEVENTNOTIFICATION *notifyGetNotificationListPtr_IMPL(struct Notifier *pNotifier);
|
||||
|
||||
static inline PEVENTNOTIFICATION *notifyGetNotificationListPtr_DISPATCH(struct Notifier *pNotifier) {
|
||||
return pNotifier->__notifyGetNotificationListPtr__(pNotifier);
|
||||
}
|
||||
|
||||
struct NotifShare *notifyGetNotificationShare_IMPL(struct Notifier *pNotifier);
|
||||
|
||||
static inline struct NotifShare *notifyGetNotificationShare_DISPATCH(struct Notifier *pNotifier) {
|
||||
return pNotifier->__notifyGetNotificationShare__(pNotifier);
|
||||
}
|
||||
|
||||
void notifySetNotificationShare_IMPL(struct Notifier *pNotifier, struct NotifShare *pNotifShare);
|
||||
|
||||
static inline void notifySetNotificationShare_DISPATCH(struct Notifier *pNotifier, struct NotifShare *pNotifShare) {
|
||||
pNotifier->__notifySetNotificationShare__(pNotifier, pNotifShare);
|
||||
}
|
||||
|
||||
NV_STATUS notifyUnregisterEvent_IMPL(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent);
|
||||
|
||||
static inline NV_STATUS notifyUnregisterEvent_DISPATCH(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, NvHandle hEventClient, NvHandle hEvent) {
|
||||
return pNotifier->__notifyUnregisterEvent__(pNotifier, hNotifierClient, hNotifierResource, hEventClient, hEvent);
|
||||
}
|
||||
|
||||
NV_STATUS notifyGetOrAllocNotifShare_IMPL(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare);
|
||||
|
||||
static inline NV_STATUS notifyGetOrAllocNotifShare_DISPATCH(struct Notifier *pNotifier, NvHandle hNotifierClient, NvHandle hNotifierResource, struct NotifShare **ppNotifShare) {
|
||||
return pNotifier->__notifyGetOrAllocNotifShare__(pNotifier, hNotifierClient, hNotifierResource, ppNotifShare);
|
||||
}
|
||||
|
||||
NV_STATUS notifyConstruct_IMPL(struct Notifier *arg_pNotifier, struct CALL_CONTEXT *arg_pCallContext);
|
||||
#define __nvoc_notifyConstruct(arg_pNotifier, arg_pCallContext) notifyConstruct_IMPL(arg_pNotifier, arg_pCallContext)
|
||||
void notifyDestruct_IMPL(struct Notifier *pNotifier);
|
||||
#define __nvoc_notifyDestruct(pNotifier) notifyDestruct_IMPL(pNotifier)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
void CliAddSystemEvent(NvU32, NvU32);
|
||||
NvBool CliDelObjectEvents(NvHandle hClient, NvHandle hObject);
|
||||
NvBool CliGetEventInfo(NvHandle hClient, NvHandle hEvent, struct Event **ppEvent);
|
||||
NV_STATUS CliGetEventNotificationList(NvHandle hClient, NvHandle hObject,
|
||||
struct INotifier **ppNotifier,
|
||||
PEVENTNOTIFICATION **pppEventNotification);
|
||||
|
||||
NV_STATUS registerEventNotification(PEVENTNOTIFICATION*, NvHandle, NvHandle, NvHandle, NvU32, NvU32, NvP64, NvBool);
|
||||
NV_STATUS unregisterEventNotification(PEVENTNOTIFICATION*, NvHandle, NvHandle, NvHandle);
|
||||
NV_STATUS unregisterEventNotificationWithData(PEVENTNOTIFICATION *, NvHandle, NvHandle, NvHandle, NvBool, NvP64);
|
||||
NV_STATUS bindEventNotificationToSubdevice(PEVENTNOTIFICATION, NvHandle, NvU32);
|
||||
NV_STATUS engineNonStallIntrNotify(OBJGPU *, NvU32);
|
||||
NV_STATUS notifyEvents(OBJGPU*, EVENTNOTIFICATION*, NvU32, NvU32, NvU32, NV_STATUS, NvU32);
|
||||
NV_STATUS engineNonStallIntrNotifyEvent(OBJGPU *, NvU32, NvHandle);
|
||||
|
||||
#endif // _EVENT_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_EVENT_NVOC_H_
|
||||
@@ -1,334 +0,0 @@
|
||||
#define NVOC_GENERIC_ENGINE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_generic_engine_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x4bc329 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
|
||||
|
||||
void __nvoc_init_GenericEngineApi(GenericEngineApi*);
|
||||
void __nvoc_init_funcTable_GenericEngineApi(GenericEngineApi*);
|
||||
NV_STATUS __nvoc_ctor_GenericEngineApi(GenericEngineApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_GenericEngineApi(GenericEngineApi*);
|
||||
void __nvoc_dtor_GenericEngineApi(GenericEngineApi*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GenericEngineApi;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_GenericEngineApi = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_GenericEngineApi,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GenericEngineApi,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource.__nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GenericEngineApi_GpuResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GenericEngineApi, __nvoc_base_GpuResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_GenericEngineApi = {
|
||||
/*numRelatives=*/ 6,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_GenericEngineApi_GenericEngineApi,
|
||||
&__nvoc_rtti_GenericEngineApi_GpuResource,
|
||||
&__nvoc_rtti_GenericEngineApi_RmResource,
|
||||
&__nvoc_rtti_GenericEngineApi_RmResourceCommon,
|
||||
&__nvoc_rtti_GenericEngineApi_RsResource,
|
||||
&__nvoc_rtti_GenericEngineApi_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(GenericEngineApi),
|
||||
/*classId=*/ classId(GenericEngineApi),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "GenericEngineApi",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GenericEngineApi,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_GenericEngineApi,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_GenericEngineApi
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GenericEngineApi_gpuresMap(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return genapiMap((struct GenericEngineApi *)(((unsigned char *)pGenericEngineApi) - __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GenericEngineApi_gpuresGetMapAddrSpace(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return genapiGetMapAddrSpace((struct GenericEngineApi *)(((unsigned char *)pGenericEngineApi) - __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GenericEngineApi_gpuresControl(struct GpuResource *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return genapiControl((struct GenericEngineApi *)(((unsigned char *)pGenericEngineApi) - __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_GpuResource_genapiShareCallback(struct GenericEngineApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_genapiUnmap(struct GenericEngineApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_genapiGetMemInterMapParams(struct GenericEngineApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_genapiGetMemoryMappingDescriptor(struct GenericEngineApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static NvHandle __nvoc_thunk_GpuResource_genapiGetInternalObjectHandle(struct GenericEngineApi *pGpuResource) {
|
||||
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_genapiControlFilter(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_genapiAddAdditionalDependants(struct RsClient *pClient, struct GenericEngineApi *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_genapiGetRefCount(struct GenericEngineApi *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_genapiCheckMemInterUnmap(struct GenericEngineApi *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_genapiMapTo(struct GenericEngineApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_genapiControl_Prologue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_genapiGetRegBaseOffsetAndSize(struct GenericEngineApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RsResource_genapiCanCopy(struct GenericEngineApi *pResource) {
|
||||
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_genapiInternalControlForward(struct GenericEngineApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_GenericEngineApi_GpuResource.offset), command, pParams, size);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_genapiPreDestruct(struct GenericEngineApi *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_genapiUnmapFrom(struct GenericEngineApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_genapiControl_Epilogue(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_genapiControlLookup(struct GenericEngineApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_genapiAccessCallback(struct GenericEngineApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GenericEngineApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_GenericEngineApi =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_GpuResource(GpuResource*);
|
||||
void __nvoc_dtor_GenericEngineApi(GenericEngineApi *pThis) {
|
||||
__nvoc_genapiDestruct(pThis);
|
||||
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_GenericEngineApi(GenericEngineApi *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_GenericEngineApi(GenericEngineApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_GenericEngineApi_fail_GpuResource;
|
||||
__nvoc_init_dataField_GenericEngineApi(pThis);
|
||||
|
||||
status = __nvoc_genapiConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_GenericEngineApi_fail__init;
|
||||
goto __nvoc_ctor_GenericEngineApi_exit; // Success
|
||||
|
||||
__nvoc_ctor_GenericEngineApi_fail__init:
|
||||
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
__nvoc_ctor_GenericEngineApi_fail_GpuResource:
|
||||
__nvoc_ctor_GenericEngineApi_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_GenericEngineApi_1(GenericEngineApi *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__genapiMap__ = &genapiMap_IMPL;
|
||||
|
||||
pThis->__genapiGetMapAddrSpace__ = &genapiGetMapAddrSpace_IMPL;
|
||||
|
||||
pThis->__genapiControl__ = &genapiControl_IMPL;
|
||||
|
||||
pThis->__nvoc_base_GpuResource.__gpuresMap__ = &__nvoc_thunk_GenericEngineApi_gpuresMap;
|
||||
|
||||
pThis->__nvoc_base_GpuResource.__gpuresGetMapAddrSpace__ = &__nvoc_thunk_GenericEngineApi_gpuresGetMapAddrSpace;
|
||||
|
||||
pThis->__nvoc_base_GpuResource.__gpuresControl__ = &__nvoc_thunk_GenericEngineApi_gpuresControl;
|
||||
|
||||
pThis->__genapiShareCallback__ = &__nvoc_thunk_GpuResource_genapiShareCallback;
|
||||
|
||||
pThis->__genapiUnmap__ = &__nvoc_thunk_GpuResource_genapiUnmap;
|
||||
|
||||
pThis->__genapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_genapiGetMemInterMapParams;
|
||||
|
||||
pThis->__genapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_genapiGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__genapiGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_genapiGetInternalObjectHandle;
|
||||
|
||||
pThis->__genapiControlFilter__ = &__nvoc_thunk_RsResource_genapiControlFilter;
|
||||
|
||||
pThis->__genapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_genapiAddAdditionalDependants;
|
||||
|
||||
pThis->__genapiGetRefCount__ = &__nvoc_thunk_RsResource_genapiGetRefCount;
|
||||
|
||||
pThis->__genapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_genapiCheckMemInterUnmap;
|
||||
|
||||
pThis->__genapiMapTo__ = &__nvoc_thunk_RsResource_genapiMapTo;
|
||||
|
||||
pThis->__genapiControl_Prologue__ = &__nvoc_thunk_RmResource_genapiControl_Prologue;
|
||||
|
||||
pThis->__genapiGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_genapiGetRegBaseOffsetAndSize;
|
||||
|
||||
pThis->__genapiCanCopy__ = &__nvoc_thunk_RsResource_genapiCanCopy;
|
||||
|
||||
pThis->__genapiInternalControlForward__ = &__nvoc_thunk_GpuResource_genapiInternalControlForward;
|
||||
|
||||
pThis->__genapiPreDestruct__ = &__nvoc_thunk_RsResource_genapiPreDestruct;
|
||||
|
||||
pThis->__genapiUnmapFrom__ = &__nvoc_thunk_RsResource_genapiUnmapFrom;
|
||||
|
||||
pThis->__genapiControl_Epilogue__ = &__nvoc_thunk_RmResource_genapiControl_Epilogue;
|
||||
|
||||
pThis->__genapiControlLookup__ = &__nvoc_thunk_RsResource_genapiControlLookup;
|
||||
|
||||
pThis->__genapiAccessCallback__ = &__nvoc_thunk_RmResource_genapiAccessCallback;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_GenericEngineApi(GenericEngineApi *pThis) {
|
||||
__nvoc_init_funcTable_GenericEngineApi_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_GpuResource(GpuResource*);
|
||||
void __nvoc_init_GenericEngineApi(GenericEngineApi *pThis) {
|
||||
pThis->__nvoc_pbase_GenericEngineApi = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
|
||||
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
|
||||
__nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
__nvoc_init_funcTable_GenericEngineApi(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_GenericEngineApi(GenericEngineApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
GenericEngineApi *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(GenericEngineApi));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(GenericEngineApi));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GenericEngineApi);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_GenericEngineApi(pThis);
|
||||
status = __nvoc_ctor_GenericEngineApi(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_GenericEngineApi_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_GenericEngineApi_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_GenericEngineApi(GenericEngineApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_GenericEngineApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,237 +0,0 @@
|
||||
#ifndef _G_GENERIC_ENGINE_NVOC_H_
|
||||
#define _G_GENERIC_ENGINE_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "g_generic_engine_nvoc.h"
|
||||
|
||||
#ifndef _GENERICENGINEAPI_H_
|
||||
#define _GENERICENGINEAPI_H_
|
||||
|
||||
#include "gpu/gpu_resource.h"
|
||||
|
||||
/*!
|
||||
* RM internal class providing a generic engine API to RM clients (e.g.:
|
||||
* GF100_SUBDEVICE_GRAPHICS and GF100_SUBDEVICE_FB). Classes are primarily used
|
||||
* for exposing BAR0 mappings and controls.
|
||||
*/
|
||||
#ifdef NVOC_GENERIC_ENGINE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct GenericEngineApi {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct GpuResource __nvoc_base_GpuResource;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct GpuResource *__nvoc_pbase_GpuResource;
|
||||
struct GenericEngineApi *__nvoc_pbase_GenericEngineApi;
|
||||
NV_STATUS (*__genapiMap__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
|
||||
NV_STATUS (*__genapiGetMapAddrSpace__)(struct GenericEngineApi *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
|
||||
NV_STATUS (*__genapiControl__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NvBool (*__genapiShareCallback__)(struct GenericEngineApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NV_STATUS (*__genapiUnmap__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RsCpuMapping *);
|
||||
NV_STATUS (*__genapiGetMemInterMapParams__)(struct GenericEngineApi *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__genapiGetMemoryMappingDescriptor__)(struct GenericEngineApi *, struct MEMORY_DESCRIPTOR **);
|
||||
NvHandle (*__genapiGetInternalObjectHandle__)(struct GenericEngineApi *);
|
||||
NV_STATUS (*__genapiControlFilter__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
void (*__genapiAddAdditionalDependants__)(struct RsClient *, struct GenericEngineApi *, RsResourceRef *);
|
||||
NvU32 (*__genapiGetRefCount__)(struct GenericEngineApi *);
|
||||
NV_STATUS (*__genapiCheckMemInterUnmap__)(struct GenericEngineApi *, NvBool);
|
||||
NV_STATUS (*__genapiMapTo__)(struct GenericEngineApi *, RS_RES_MAP_TO_PARAMS *);
|
||||
NV_STATUS (*__genapiControl_Prologue__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__genapiGetRegBaseOffsetAndSize__)(struct GenericEngineApi *, struct OBJGPU *, NvU32 *, NvU32 *);
|
||||
NvBool (*__genapiCanCopy__)(struct GenericEngineApi *);
|
||||
NV_STATUS (*__genapiInternalControlForward__)(struct GenericEngineApi *, NvU32, void *, NvU32);
|
||||
void (*__genapiPreDestruct__)(struct GenericEngineApi *);
|
||||
NV_STATUS (*__genapiUnmapFrom__)(struct GenericEngineApi *, RS_RES_UNMAP_FROM_PARAMS *);
|
||||
void (*__genapiControl_Epilogue__)(struct GenericEngineApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__genapiControlLookup__)(struct GenericEngineApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NvBool (*__genapiAccessCallback__)(struct GenericEngineApi *, struct RsClient *, void *, RsAccessRight);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_GenericEngineApi_TYPEDEF__
|
||||
#define __NVOC_CLASS_GenericEngineApi_TYPEDEF__
|
||||
typedef struct GenericEngineApi GenericEngineApi;
|
||||
#endif /* __NVOC_CLASS_GenericEngineApi_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_GenericEngineApi
|
||||
#define __nvoc_class_id_GenericEngineApi 0x4bc329
|
||||
#endif /* __nvoc_class_id_GenericEngineApi */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GenericEngineApi;
|
||||
|
||||
#define __staticCast_GenericEngineApi(pThis) \
|
||||
((pThis)->__nvoc_pbase_GenericEngineApi)
|
||||
|
||||
#ifdef __nvoc_generic_engine_h_disabled
|
||||
#define __dynamicCast_GenericEngineApi(pThis) ((GenericEngineApi*)NULL)
|
||||
#else //__nvoc_generic_engine_h_disabled
|
||||
#define __dynamicCast_GenericEngineApi(pThis) \
|
||||
((GenericEngineApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GenericEngineApi)))
|
||||
#endif //__nvoc_generic_engine_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_GenericEngineApi(GenericEngineApi**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_GenericEngineApi(GenericEngineApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
#define __objCreate_GenericEngineApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
|
||||
__nvoc_objCreate_GenericEngineApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
|
||||
|
||||
#define genapiMap(pGenericEngineApi, pCallContext, pParams, pCpuMapping) genapiMap_DISPATCH(pGenericEngineApi, pCallContext, pParams, pCpuMapping)
|
||||
#define genapiGetMapAddrSpace(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace) genapiGetMapAddrSpace_DISPATCH(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace)
|
||||
#define genapiControl(pGenericEngineApi, pCallContext, pParams) genapiControl_DISPATCH(pGenericEngineApi, pCallContext, pParams)
|
||||
#define genapiShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) genapiShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define genapiUnmap(pGpuResource, pCallContext, pCpuMapping) genapiUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
|
||||
#define genapiGetMemInterMapParams(pRmResource, pParams) genapiGetMemInterMapParams_DISPATCH(pRmResource, pParams)
|
||||
#define genapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) genapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
|
||||
#define genapiGetInternalObjectHandle(pGpuResource) genapiGetInternalObjectHandle_DISPATCH(pGpuResource)
|
||||
#define genapiControlFilter(pResource, pCallContext, pParams) genapiControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define genapiAddAdditionalDependants(pClient, pResource, pReference) genapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define genapiGetRefCount(pResource) genapiGetRefCount_DISPATCH(pResource)
|
||||
#define genapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) genapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
|
||||
#define genapiMapTo(pResource, pParams) genapiMapTo_DISPATCH(pResource, pParams)
|
||||
#define genapiControl_Prologue(pResource, pCallContext, pParams) genapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define genapiGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) genapiGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize)
|
||||
#define genapiCanCopy(pResource) genapiCanCopy_DISPATCH(pResource)
|
||||
#define genapiInternalControlForward(pGpuResource, command, pParams, size) genapiInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
|
||||
#define genapiPreDestruct(pResource) genapiPreDestruct_DISPATCH(pResource)
|
||||
#define genapiUnmapFrom(pResource, pParams) genapiUnmapFrom_DISPATCH(pResource, pParams)
|
||||
#define genapiControl_Epilogue(pResource, pCallContext, pParams) genapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define genapiControlLookup(pResource, pParams, ppEntry) genapiControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define genapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) genapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
NV_STATUS genapiMap_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping);
|
||||
|
||||
static inline NV_STATUS genapiMap_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGenericEngineApi->__genapiMap__(pGenericEngineApi, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
NV_STATUS genapiGetMapAddrSpace_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace);
|
||||
|
||||
static inline NV_STATUS genapiGetMapAddrSpace_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return pGenericEngineApi->__genapiGetMapAddrSpace__(pGenericEngineApi, pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
NV_STATUS genapiControl_IMPL(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams);
|
||||
|
||||
static inline NV_STATUS genapiControl_DISPATCH(struct GenericEngineApi *pGenericEngineApi, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pGenericEngineApi->__genapiControl__(pGenericEngineApi, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NvBool genapiShareCallback_DISPATCH(struct GenericEngineApi *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pGpuResource->__genapiShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NV_STATUS genapiUnmap_DISPATCH(struct GenericEngineApi *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__genapiUnmap__(pGpuResource, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NV_STATUS genapiGetMemInterMapParams_DISPATCH(struct GenericEngineApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pRmResource->__genapiGetMemInterMapParams__(pRmResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS genapiGetMemoryMappingDescriptor_DISPATCH(struct GenericEngineApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pRmResource->__genapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline NvHandle genapiGetInternalObjectHandle_DISPATCH(struct GenericEngineApi *pGpuResource) {
|
||||
return pGpuResource->__genapiGetInternalObjectHandle__(pGpuResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS genapiControlFilter_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__genapiControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline void genapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GenericEngineApi *pResource, RsResourceRef *pReference) {
|
||||
pResource->__genapiAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline NvU32 genapiGetRefCount_DISPATCH(struct GenericEngineApi *pResource) {
|
||||
return pResource->__genapiGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS genapiCheckMemInterUnmap_DISPATCH(struct GenericEngineApi *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return pRmResource->__genapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static inline NV_STATUS genapiMapTo_DISPATCH(struct GenericEngineApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pResource->__genapiMapTo__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS genapiControl_Prologue_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__genapiControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS genapiGetRegBaseOffsetAndSize_DISPATCH(struct GenericEngineApi *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return pGpuResource->__genapiGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static inline NvBool genapiCanCopy_DISPATCH(struct GenericEngineApi *pResource) {
|
||||
return pResource->__genapiCanCopy__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS genapiInternalControlForward_DISPATCH(struct GenericEngineApi *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return pGpuResource->__genapiInternalControlForward__(pGpuResource, command, pParams, size);
|
||||
}
|
||||
|
||||
static inline void genapiPreDestruct_DISPATCH(struct GenericEngineApi *pResource) {
|
||||
pResource->__genapiPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS genapiUnmapFrom_DISPATCH(struct GenericEngineApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pResource->__genapiUnmapFrom__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void genapiControl_Epilogue_DISPATCH(struct GenericEngineApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__genapiControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS genapiControlLookup_DISPATCH(struct GenericEngineApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__genapiControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NvBool genapiAccessCallback_DISPATCH(struct GenericEngineApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__genapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
NV_STATUS genapiConstruct_IMPL(struct GenericEngineApi *arg_pGenericEngineApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
|
||||
#define __nvoc_genapiConstruct(arg_pGenericEngineApi, arg_pCallContext, arg_pParams) genapiConstruct_IMPL(arg_pGenericEngineApi, arg_pCallContext, arg_pParams)
|
||||
void genapiDestruct_IMPL(struct GenericEngineApi *pGenericEngineApi);
|
||||
#define __nvoc_genapiDestruct(pGenericEngineApi) genapiDestruct_IMPL(pGenericEngineApi)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#endif // _GENERICENGINEAPI_H_
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_GENERIC_ENGINE_NVOC_H_
|
||||
@@ -1,59 +0,0 @@
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <core/core.h>
|
||||
#include <gpu/gpu.h>
|
||||
#include <gpu/eng_desc.h>
|
||||
#include <g_allclasses.h>
|
||||
|
||||
|
||||
|
||||
const CLASSDESCRIPTOR *
|
||||
gpuGetClassDescriptorList_T234D(POBJGPU pGpu, NvU32 *pNumClassDescriptors)
|
||||
{
|
||||
static const CLASSDESCRIPTOR halT234DClassDescriptorList[] = {
|
||||
{ GF100_HDACODEC, ENG_HDACODEC },
|
||||
{ NV01_MEMORY_SYNCPOINT, ENG_DMA },
|
||||
{ NV04_DISPLAY_COMMON, ENG_KERNEL_DISPLAY },
|
||||
{ NVC372_DISPLAY_SW, ENG_KERNEL_DISPLAY },
|
||||
{ NVC670_DISPLAY, ENG_KERNEL_DISPLAY },
|
||||
{ NVC671_DISP_SF_USER, ENG_KERNEL_DISPLAY },
|
||||
{ NVC673_DISP_CAPABILITIES, ENG_KERNEL_DISPLAY },
|
||||
{ NVC67A_CURSOR_IMM_CHANNEL_PIO, ENG_KERNEL_DISPLAY },
|
||||
{ NVC67B_WINDOW_IMM_CHANNEL_DMA, ENG_KERNEL_DISPLAY },
|
||||
{ NVC67D_CORE_CHANNEL_DMA, ENG_KERNEL_DISPLAY },
|
||||
{ NVC67E_WINDOW_CHANNEL_DMA, ENG_KERNEL_DISPLAY },
|
||||
{ NVC77F_ANY_CHANNEL_DMA, ENG_KERNEL_DISPLAY },
|
||||
};
|
||||
|
||||
#define HALT234D_NUM_CLASS_DESCS (sizeof(halT234DClassDescriptorList) / sizeof(CLASSDESCRIPTOR))
|
||||
|
||||
#define HALT234D_NUM_CLASSES 16
|
||||
|
||||
ct_assert(NV0080_CTRL_GPU_CLASSLIST_MAX_SIZE >= HALT234D_NUM_CLASSES);
|
||||
|
||||
*pNumClassDescriptors = HALT234D_NUM_CLASS_DESCS;
|
||||
return halT234DClassDescriptorList;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,154 +0,0 @@
|
||||
#define NVOC_GPU_DB_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_gpu_db_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xcdd250 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
void __nvoc_init_GpuDb(GpuDb*);
|
||||
void __nvoc_init_funcTable_GpuDb(GpuDb*);
|
||||
NV_STATUS __nvoc_ctor_GpuDb(GpuDb*);
|
||||
void __nvoc_init_dataField_GpuDb(GpuDb*);
|
||||
void __nvoc_dtor_GpuDb(GpuDb*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuDb;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GpuDb_GpuDb = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_GpuDb,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuDb,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GpuDb_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GpuDb, __nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_GpuDb = {
|
||||
/*numRelatives=*/ 2,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_GpuDb_GpuDb,
|
||||
&__nvoc_rtti_GpuDb_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_GpuDb =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(GpuDb),
|
||||
/*classId=*/ classId(GpuDb),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "GpuDb",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuDb,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_GpuDb,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_GpuDb
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuDb =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Object(Object*);
|
||||
void __nvoc_dtor_GpuDb(GpuDb *pThis) {
|
||||
__nvoc_gpudbDestruct(pThis);
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_GpuDb(GpuDb *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Object(Object* );
|
||||
NV_STATUS __nvoc_ctor_GpuDb(GpuDb *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
|
||||
if (status != NV_OK) goto __nvoc_ctor_GpuDb_fail_Object;
|
||||
__nvoc_init_dataField_GpuDb(pThis);
|
||||
|
||||
status = __nvoc_gpudbConstruct(pThis);
|
||||
if (status != NV_OK) goto __nvoc_ctor_GpuDb_fail__init;
|
||||
goto __nvoc_ctor_GpuDb_exit; // Success
|
||||
|
||||
__nvoc_ctor_GpuDb_fail__init:
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_ctor_GpuDb_fail_Object:
|
||||
__nvoc_ctor_GpuDb_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_GpuDb_1(GpuDb *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_GpuDb(GpuDb *pThis) {
|
||||
__nvoc_init_funcTable_GpuDb_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_GpuDb(GpuDb *pThis) {
|
||||
pThis->__nvoc_pbase_GpuDb = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
|
||||
__nvoc_init_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_init_funcTable_GpuDb(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_GpuDb(GpuDb **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
GpuDb *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(GpuDb));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(GpuDb));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GpuDb);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_GpuDb(pThis);
|
||||
status = __nvoc_ctor_GpuDb(pThis);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_GpuDb_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_GpuDb_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_GpuDb(GpuDb **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_GpuDb(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,148 +0,0 @@
|
||||
#define NVOC_GPU_GROUP_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_gpu_group_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xe40531 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
void __nvoc_init_OBJGPUGRP(OBJGPUGRP*);
|
||||
void __nvoc_init_funcTable_OBJGPUGRP(OBJGPUGRP*);
|
||||
NV_STATUS __nvoc_ctor_OBJGPUGRP(OBJGPUGRP*);
|
||||
void __nvoc_init_dataField_OBJGPUGRP(OBJGPUGRP*);
|
||||
void __nvoc_dtor_OBJGPUGRP(OBJGPUGRP*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUGRP;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJGPUGRP_OBJGPUGRP = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJGPUGRP,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPUGRP,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJGPUGRP_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJGPUGRP, __nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPUGRP = {
|
||||
/*numRelatives=*/ 2,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_OBJGPUGRP_OBJGPUGRP,
|
||||
&__nvoc_rtti_OBJGPUGRP_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUGRP =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(OBJGPUGRP),
|
||||
/*classId=*/ classId(OBJGPUGRP),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "OBJGPUGRP",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPUGRP,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_OBJGPUGRP,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_OBJGPUGRP
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUGRP =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Object(Object*);
|
||||
void __nvoc_dtor_OBJGPUGRP(OBJGPUGRP *pThis) {
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_OBJGPUGRP(OBJGPUGRP *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Object(Object* );
|
||||
NV_STATUS __nvoc_ctor_OBJGPUGRP(OBJGPUGRP *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJGPUGRP_fail_Object;
|
||||
__nvoc_init_dataField_OBJGPUGRP(pThis);
|
||||
goto __nvoc_ctor_OBJGPUGRP_exit; // Success
|
||||
|
||||
__nvoc_ctor_OBJGPUGRP_fail_Object:
|
||||
__nvoc_ctor_OBJGPUGRP_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_OBJGPUGRP_1(OBJGPUGRP *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_OBJGPUGRP(OBJGPUGRP *pThis) {
|
||||
__nvoc_init_funcTable_OBJGPUGRP_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_OBJGPUGRP(OBJGPUGRP *pThis) {
|
||||
pThis->__nvoc_pbase_OBJGPUGRP = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
|
||||
__nvoc_init_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_init_funcTable_OBJGPUGRP(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OBJGPUGRP(OBJGPUGRP **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
OBJGPUGRP *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(OBJGPUGRP));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(OBJGPUGRP));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPUGRP);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_OBJGPUGRP(pThis);
|
||||
status = __nvoc_ctor_OBJGPUGRP(pThis);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_OBJGPUGRP_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_OBJGPUGRP_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OBJGPUGRP(OBJGPUGRP **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_OBJGPUGRP(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
#define NVOC_GPU_HALSPEC_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_gpu_halspec_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x34a6d6 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner;
|
||||
|
||||
void __nvoc_init_RmHalspecOwner(RmHalspecOwner*,
|
||||
NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev,
|
||||
RM_RUNTIME_VARIANT RmVariantHal_rmVariant,
|
||||
NvU32 DispIpHal_ipver);
|
||||
void __nvoc_init_funcTable_RmHalspecOwner(RmHalspecOwner*);
|
||||
NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner*);
|
||||
void __nvoc_init_dataField_RmHalspecOwner(RmHalspecOwner*);
|
||||
void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_RmHalspecOwner;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_RmHalspecOwner_RmHalspecOwner = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmHalspecOwner,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_RmHalspecOwner,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_RmHalspecOwner = {
|
||||
/*numRelatives=*/ 1,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_RmHalspecOwner_RmHalspecOwner,
|
||||
},
|
||||
};
|
||||
|
||||
// Not instantiable because it's not derived from class "Object"
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(RmHalspecOwner),
|
||||
/*classId=*/ classId(RmHalspecOwner),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "RmHalspecOwner",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) NULL,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_RmHalspecOwner,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_RmHalspecOwner
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_RmHalspecOwner =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_RmHalspecOwner(RmHalspecOwner *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
__nvoc_init_dataField_RmHalspecOwner(pThis);
|
||||
goto __nvoc_ctor_RmHalspecOwner_exit; // Success
|
||||
|
||||
__nvoc_ctor_RmHalspecOwner_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_RmHalspecOwner_1(RmHalspecOwner *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_RmHalspecOwner(RmHalspecOwner *pThis) {
|
||||
__nvoc_init_funcTable_RmHalspecOwner_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_RmHalspecOwner(RmHalspecOwner *pThis,
|
||||
NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev,
|
||||
RM_RUNTIME_VARIANT RmVariantHal_rmVariant,
|
||||
NvU32 DispIpHal_ipver) {
|
||||
pThis->__nvoc_pbase_RmHalspecOwner = pThis;
|
||||
__nvoc_init_halspec_ChipHal(&pThis->chipHal, ChipHal_arch, ChipHal_impl, ChipHal_hidrev);
|
||||
__nvoc_init_halspec_RmVariantHal(&pThis->rmVariantHal, RmVariantHal_rmVariant);
|
||||
__nvoc_init_halspec_DispIpHal(&pThis->dispIpHal, DispIpHal_ipver);
|
||||
__nvoc_init_funcTable_RmHalspecOwner(pThis);
|
||||
}
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
#ifndef _G_GPU_HALSPEC_NVOC_H_
|
||||
#define _G_GPU_HALSPEC_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "g_gpu_halspec_nvoc.h"
|
||||
|
||||
#ifndef GPU_HALSPEC_H
|
||||
#define GPU_HALSPEC_H
|
||||
|
||||
#include "g_chips2halspec.h" // NVOC halspec, generated by rmconfig.pl
|
||||
|
||||
#ifdef NVOC_GPU_HALSPEC_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct RmHalspecOwner {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct RmHalspecOwner *__nvoc_pbase_RmHalspecOwner;
|
||||
struct ChipHal chipHal;
|
||||
struct RmVariantHal rmVariantHal;
|
||||
struct DispIpHal dispIpHal;
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_RmHalspecOwner_TYPEDEF__
|
||||
#define __NVOC_CLASS_RmHalspecOwner_TYPEDEF__
|
||||
typedef struct RmHalspecOwner RmHalspecOwner;
|
||||
#endif /* __NVOC_CLASS_RmHalspecOwner_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_RmHalspecOwner
|
||||
#define __nvoc_class_id_RmHalspecOwner 0x34a6d6
|
||||
#endif /* __nvoc_class_id_RmHalspecOwner */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner;
|
||||
|
||||
#define __staticCast_RmHalspecOwner(pThis) \
|
||||
((pThis)->__nvoc_pbase_RmHalspecOwner)
|
||||
|
||||
#ifdef __nvoc_gpu_halspec_h_disabled
|
||||
#define __dynamicCast_RmHalspecOwner(pThis) ((RmHalspecOwner*)NULL)
|
||||
#else //__nvoc_gpu_halspec_h_disabled
|
||||
#define __dynamicCast_RmHalspecOwner(pThis) \
|
||||
((RmHalspecOwner*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(RmHalspecOwner)))
|
||||
#endif //__nvoc_gpu_halspec_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_RmHalspecOwner(RmHalspecOwner**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_RmHalspecOwner(RmHalspecOwner**, Dynamic*, NvU32,
|
||||
NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev,
|
||||
RM_RUNTIME_VARIANT RmVariantHal_rmVariant,
|
||||
NvU32 DispIpHal_ipver);
|
||||
#define __objCreate_RmHalspecOwner(ppNewObj, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver) \
|
||||
__nvoc_objCreate_RmHalspecOwner((ppNewObj), staticCast((pParent), Dynamic), (createFlags), ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver)
|
||||
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#endif // GPU_HALSPEC_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_GPU_HALSPEC_NVOC_H_
|
||||
@@ -1,322 +0,0 @@
|
||||
#define NVOC_GPU_MGMT_API_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_gpu_mgmt_api_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x376305 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
void __nvoc_init_GpuManagementApi(GpuManagementApi*);
|
||||
void __nvoc_init_funcTable_GpuManagementApi(GpuManagementApi*);
|
||||
NV_STATUS __nvoc_ctor_GpuManagementApi(GpuManagementApi*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_GpuManagementApi(GpuManagementApi*);
|
||||
void __nvoc_dtor_GpuManagementApi(GpuManagementApi*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuManagementApi;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_GpuManagementApi = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_GpuManagementApi,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuManagementApi,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GpuManagementApi_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GpuManagementApi, __nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_GpuManagementApi = {
|
||||
/*numRelatives=*/ 5,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_GpuManagementApi_GpuManagementApi,
|
||||
&__nvoc_rtti_GpuManagementApi_RmResource,
|
||||
&__nvoc_rtti_GpuManagementApi_RmResourceCommon,
|
||||
&__nvoc_rtti_GpuManagementApi_RsResource,
|
||||
&__nvoc_rtti_GpuManagementApi_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(GpuManagementApi),
|
||||
/*classId=*/ classId(GpuManagementApi),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "GpuManagementApi",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuManagementApi,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_GpuManagementApi,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_GpuManagementApi
|
||||
};
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_gpumgmtapiShareCallback(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiCheckMemInterUnmap(struct GpuManagementApi *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiControl(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControl((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiGetMemInterMapParams(struct GpuManagementApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiGetMemoryMappingDescriptor(struct GpuManagementApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_gpumgmtapiGetRefCount(struct GpuManagementApi *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiControlFilter(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_gpumgmtapiAddAdditionalDependants(struct RsClient *pClient, struct GpuManagementApi *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiUnmap(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
|
||||
return resUnmap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_gpumgmtapiControl_Prologue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RsResource_gpumgmtapiCanCopy(struct GpuManagementApi *pResource) {
|
||||
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiMapTo(struct GpuManagementApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_gpumgmtapiPreDestruct(struct GpuManagementApi *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiUnmapFrom(struct GpuManagementApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_gpumgmtapiControl_Epilogue(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiControlLookup(struct GpuManagementApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_gpumgmtapiMap(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
|
||||
return resMap((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RsResource.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_gpumgmtapiAccessCallback(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuManagementApi_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
#if !defined(NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG)
|
||||
#define NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(x) (0)
|
||||
#endif
|
||||
|
||||
static const struct NVOC_EXPORTED_METHOD_DEF __nvoc_exported_method_def_GpuManagementApi[] =
|
||||
{
|
||||
{ /* [0] */
|
||||
#if NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
|
||||
/*pFunc=*/ (void (*)(void)) NULL,
|
||||
#else
|
||||
/*pFunc=*/ (void (*)(void)) gpumgmtapiCtrlCmdSetShutdownState_IMPL,
|
||||
#endif // NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
|
||||
/*flags=*/ 0x7u,
|
||||
/*accessRight=*/0x0u,
|
||||
/*methodId=*/ 0x200101u,
|
||||
/*paramSize=*/ sizeof(NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS),
|
||||
/*pClassInfo=*/ &(__nvoc_class_def_GpuManagementApi.classInfo),
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*func=*/ "gpumgmtapiCtrlCmdSetShutdownState"
|
||||
#endif
|
||||
},
|
||||
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuManagementApi =
|
||||
{
|
||||
/*numEntries=*/ 1,
|
||||
/*pExportEntries=*/ __nvoc_exported_method_def_GpuManagementApi
|
||||
};
|
||||
|
||||
void __nvoc_dtor_RmResource(RmResource*);
|
||||
void __nvoc_dtor_GpuManagementApi(GpuManagementApi *pThis) {
|
||||
__nvoc_gpumgmtapiDestruct(pThis);
|
||||
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_GpuManagementApi(GpuManagementApi *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_GpuManagementApi(GpuManagementApi *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_GpuManagementApi_fail_RmResource;
|
||||
__nvoc_init_dataField_GpuManagementApi(pThis);
|
||||
|
||||
status = __nvoc_gpumgmtapiConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_GpuManagementApi_fail__init;
|
||||
goto __nvoc_ctor_GpuManagementApi_exit; // Success
|
||||
|
||||
__nvoc_ctor_GpuManagementApi_fail__init:
|
||||
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
__nvoc_ctor_GpuManagementApi_fail_RmResource:
|
||||
__nvoc_ctor_GpuManagementApi_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_GpuManagementApi_1(GpuManagementApi *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
#if !NVOC_EXPORTED_METHOD_DISABLED_BY_FLAG(0x7u)
|
||||
pThis->__gpumgmtapiCtrlCmdSetShutdownState__ = &gpumgmtapiCtrlCmdSetShutdownState_IMPL;
|
||||
#endif
|
||||
|
||||
pThis->__gpumgmtapiShareCallback__ = &__nvoc_thunk_RmResource_gpumgmtapiShareCallback;
|
||||
|
||||
pThis->__gpumgmtapiCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_gpumgmtapiCheckMemInterUnmap;
|
||||
|
||||
pThis->__gpumgmtapiControl__ = &__nvoc_thunk_RsResource_gpumgmtapiControl;
|
||||
|
||||
pThis->__gpumgmtapiGetMemInterMapParams__ = &__nvoc_thunk_RmResource_gpumgmtapiGetMemInterMapParams;
|
||||
|
||||
pThis->__gpumgmtapiGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_gpumgmtapiGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__gpumgmtapiGetRefCount__ = &__nvoc_thunk_RsResource_gpumgmtapiGetRefCount;
|
||||
|
||||
pThis->__gpumgmtapiControlFilter__ = &__nvoc_thunk_RsResource_gpumgmtapiControlFilter;
|
||||
|
||||
pThis->__gpumgmtapiAddAdditionalDependants__ = &__nvoc_thunk_RsResource_gpumgmtapiAddAdditionalDependants;
|
||||
|
||||
pThis->__gpumgmtapiUnmap__ = &__nvoc_thunk_RsResource_gpumgmtapiUnmap;
|
||||
|
||||
pThis->__gpumgmtapiControl_Prologue__ = &__nvoc_thunk_RmResource_gpumgmtapiControl_Prologue;
|
||||
|
||||
pThis->__gpumgmtapiCanCopy__ = &__nvoc_thunk_RsResource_gpumgmtapiCanCopy;
|
||||
|
||||
pThis->__gpumgmtapiMapTo__ = &__nvoc_thunk_RsResource_gpumgmtapiMapTo;
|
||||
|
||||
pThis->__gpumgmtapiPreDestruct__ = &__nvoc_thunk_RsResource_gpumgmtapiPreDestruct;
|
||||
|
||||
pThis->__gpumgmtapiUnmapFrom__ = &__nvoc_thunk_RsResource_gpumgmtapiUnmapFrom;
|
||||
|
||||
pThis->__gpumgmtapiControl_Epilogue__ = &__nvoc_thunk_RmResource_gpumgmtapiControl_Epilogue;
|
||||
|
||||
pThis->__gpumgmtapiControlLookup__ = &__nvoc_thunk_RsResource_gpumgmtapiControlLookup;
|
||||
|
||||
pThis->__gpumgmtapiMap__ = &__nvoc_thunk_RsResource_gpumgmtapiMap;
|
||||
|
||||
pThis->__gpumgmtapiAccessCallback__ = &__nvoc_thunk_RmResource_gpumgmtapiAccessCallback;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_GpuManagementApi(GpuManagementApi *pThis) {
|
||||
__nvoc_init_funcTable_GpuManagementApi_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_RmResource(RmResource*);
|
||||
void __nvoc_init_GpuManagementApi(GpuManagementApi *pThis) {
|
||||
pThis->__nvoc_pbase_GpuManagementApi = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource;
|
||||
__nvoc_init_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
__nvoc_init_funcTable_GpuManagementApi(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_GpuManagementApi(GpuManagementApi **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
GpuManagementApi *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(GpuManagementApi));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(GpuManagementApi));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GpuManagementApi);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_GpuManagementApi(pThis);
|
||||
status = __nvoc_ctor_GpuManagementApi(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_GpuManagementApi_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_GpuManagementApi_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_GpuManagementApi(GpuManagementApi **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_GpuManagementApi(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,221 +0,0 @@
|
||||
#ifndef _G_GPU_MGMT_API_NVOC_H_
|
||||
#define _G_GPU_MGMT_API_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include "g_gpu_mgmt_api_nvoc.h"
|
||||
|
||||
#ifndef GPU_MGMT_API_H
|
||||
#define GPU_MGMT_API_H
|
||||
|
||||
#include "rmapi/resource.h"
|
||||
#include "ctrl/ctrl0020.h"
|
||||
|
||||
// ****************************************************************************
|
||||
// Type Definitions
|
||||
// ****************************************************************************
|
||||
|
||||
//
|
||||
// GpuManagementApi class information
|
||||
//
|
||||
// This is a global GPU class will help us to route IOCTLs to probed
|
||||
// and persistent GPU state
|
||||
//
|
||||
|
||||
#ifdef NVOC_GPU_MGMT_API_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct GpuManagementApi {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct RmResource __nvoc_base_RmResource;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct GpuManagementApi *__nvoc_pbase_GpuManagementApi;
|
||||
NV_STATUS (*__gpumgmtapiCtrlCmdSetShutdownState__)(struct GpuManagementApi *, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *);
|
||||
NvBool (*__gpumgmtapiShareCallback__)(struct GpuManagementApi *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NV_STATUS (*__gpumgmtapiCheckMemInterUnmap__)(struct GpuManagementApi *, NvBool);
|
||||
NV_STATUS (*__gpumgmtapiControl__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__gpumgmtapiGetMemInterMapParams__)(struct GpuManagementApi *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__gpumgmtapiGetMemoryMappingDescriptor__)(struct GpuManagementApi *, struct MEMORY_DESCRIPTOR **);
|
||||
NvU32 (*__gpumgmtapiGetRefCount__)(struct GpuManagementApi *);
|
||||
NV_STATUS (*__gpumgmtapiControlFilter__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
void (*__gpumgmtapiAddAdditionalDependants__)(struct RsClient *, struct GpuManagementApi *, RsResourceRef *);
|
||||
NV_STATUS (*__gpumgmtapiUnmap__)(struct GpuManagementApi *, struct CALL_CONTEXT *, RsCpuMapping *);
|
||||
NV_STATUS (*__gpumgmtapiControl_Prologue__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NvBool (*__gpumgmtapiCanCopy__)(struct GpuManagementApi *);
|
||||
NV_STATUS (*__gpumgmtapiMapTo__)(struct GpuManagementApi *, RS_RES_MAP_TO_PARAMS *);
|
||||
void (*__gpumgmtapiPreDestruct__)(struct GpuManagementApi *);
|
||||
NV_STATUS (*__gpumgmtapiUnmapFrom__)(struct GpuManagementApi *, RS_RES_UNMAP_FROM_PARAMS *);
|
||||
void (*__gpumgmtapiControl_Epilogue__)(struct GpuManagementApi *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__gpumgmtapiControlLookup__)(struct GpuManagementApi *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NV_STATUS (*__gpumgmtapiMap__)(struct GpuManagementApi *, struct CALL_CONTEXT *, RS_CPU_MAP_PARAMS *, RsCpuMapping *);
|
||||
NvBool (*__gpumgmtapiAccessCallback__)(struct GpuManagementApi *, struct RsClient *, void *, RsAccessRight);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_GpuManagementApi_TYPEDEF__
|
||||
#define __NVOC_CLASS_GpuManagementApi_TYPEDEF__
|
||||
typedef struct GpuManagementApi GpuManagementApi;
|
||||
#endif /* __NVOC_CLASS_GpuManagementApi_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_GpuManagementApi
|
||||
#define __nvoc_class_id_GpuManagementApi 0x376305
|
||||
#endif /* __nvoc_class_id_GpuManagementApi */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuManagementApi;
|
||||
|
||||
#define __staticCast_GpuManagementApi(pThis) \
|
||||
((pThis)->__nvoc_pbase_GpuManagementApi)
|
||||
|
||||
#ifdef __nvoc_gpu_mgmt_api_h_disabled
|
||||
#define __dynamicCast_GpuManagementApi(pThis) ((GpuManagementApi*)NULL)
|
||||
#else //__nvoc_gpu_mgmt_api_h_disabled
|
||||
#define __dynamicCast_GpuManagementApi(pThis) \
|
||||
((GpuManagementApi*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(GpuManagementApi)))
|
||||
#endif //__nvoc_gpu_mgmt_api_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_GpuManagementApi(GpuManagementApi**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_GpuManagementApi(GpuManagementApi**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
#define __objCreate_GpuManagementApi(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
|
||||
__nvoc_objCreate_GpuManagementApi((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
|
||||
|
||||
#define gpumgmtapiCtrlCmdSetShutdownState(pGpuMgmt, pParams) gpumgmtapiCtrlCmdSetShutdownState_DISPATCH(pGpuMgmt, pParams)
|
||||
#define gpumgmtapiShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) gpumgmtapiShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define gpumgmtapiCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) gpumgmtapiCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
|
||||
#define gpumgmtapiControl(pResource, pCallContext, pParams) gpumgmtapiControl_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define gpumgmtapiGetMemInterMapParams(pRmResource, pParams) gpumgmtapiGetMemInterMapParams_DISPATCH(pRmResource, pParams)
|
||||
#define gpumgmtapiGetMemoryMappingDescriptor(pRmResource, ppMemDesc) gpumgmtapiGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
|
||||
#define gpumgmtapiGetRefCount(pResource) gpumgmtapiGetRefCount_DISPATCH(pResource)
|
||||
#define gpumgmtapiControlFilter(pResource, pCallContext, pParams) gpumgmtapiControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define gpumgmtapiAddAdditionalDependants(pClient, pResource, pReference) gpumgmtapiAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define gpumgmtapiUnmap(pResource, pCallContext, pCpuMapping) gpumgmtapiUnmap_DISPATCH(pResource, pCallContext, pCpuMapping)
|
||||
#define gpumgmtapiControl_Prologue(pResource, pCallContext, pParams) gpumgmtapiControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define gpumgmtapiCanCopy(pResource) gpumgmtapiCanCopy_DISPATCH(pResource)
|
||||
#define gpumgmtapiMapTo(pResource, pParams) gpumgmtapiMapTo_DISPATCH(pResource, pParams)
|
||||
#define gpumgmtapiPreDestruct(pResource) gpumgmtapiPreDestruct_DISPATCH(pResource)
|
||||
#define gpumgmtapiUnmapFrom(pResource, pParams) gpumgmtapiUnmapFrom_DISPATCH(pResource, pParams)
|
||||
#define gpumgmtapiControl_Epilogue(pResource, pCallContext, pParams) gpumgmtapiControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define gpumgmtapiControlLookup(pResource, pParams, ppEntry) gpumgmtapiControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define gpumgmtapiMap(pResource, pCallContext, pParams, pCpuMapping) gpumgmtapiMap_DISPATCH(pResource, pCallContext, pParams, pCpuMapping)
|
||||
#define gpumgmtapiAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) gpumgmtapiAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
NV_STATUS gpumgmtapiCtrlCmdSetShutdownState_IMPL(struct GpuManagementApi *pGpuMgmt, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *pParams);
|
||||
|
||||
static inline NV_STATUS gpumgmtapiCtrlCmdSetShutdownState_DISPATCH(struct GpuManagementApi *pGpuMgmt, NV0020_CTRL_GPU_MGMT_SET_SHUTDOWN_STATE_PARAMS *pParams) {
|
||||
return pGpuMgmt->__gpumgmtapiCtrlCmdSetShutdownState__(pGpuMgmt, pParams);
|
||||
}
|
||||
|
||||
static inline NvBool gpumgmtapiShareCallback_DISPATCH(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pResource->__gpumgmtapiShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NV_STATUS gpumgmtapiCheckMemInterUnmap_DISPATCH(struct GpuManagementApi *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return pRmResource->__gpumgmtapiCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static inline NV_STATUS gpumgmtapiControl_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__gpumgmtapiControl__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS gpumgmtapiGetMemInterMapParams_DISPATCH(struct GpuManagementApi *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pRmResource->__gpumgmtapiGetMemInterMapParams__(pRmResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS gpumgmtapiGetMemoryMappingDescriptor_DISPATCH(struct GpuManagementApi *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pRmResource->__gpumgmtapiGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline NvU32 gpumgmtapiGetRefCount_DISPATCH(struct GpuManagementApi *pResource) {
|
||||
return pResource->__gpumgmtapiGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS gpumgmtapiControlFilter_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__gpumgmtapiControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline void gpumgmtapiAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct GpuManagementApi *pResource, RsResourceRef *pReference) {
|
||||
pResource->__gpumgmtapiAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline NV_STATUS gpumgmtapiUnmap_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
|
||||
return pResource->__gpumgmtapiUnmap__(pResource, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NV_STATUS gpumgmtapiControl_Prologue_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__gpumgmtapiControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NvBool gpumgmtapiCanCopy_DISPATCH(struct GpuManagementApi *pResource) {
|
||||
return pResource->__gpumgmtapiCanCopy__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS gpumgmtapiMapTo_DISPATCH(struct GpuManagementApi *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pResource->__gpumgmtapiMapTo__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void gpumgmtapiPreDestruct_DISPATCH(struct GpuManagementApi *pResource) {
|
||||
pResource->__gpumgmtapiPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS gpumgmtapiUnmapFrom_DISPATCH(struct GpuManagementApi *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pResource->__gpumgmtapiUnmapFrom__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void gpumgmtapiControl_Epilogue_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__gpumgmtapiControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS gpumgmtapiControlLookup_DISPATCH(struct GpuManagementApi *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__gpumgmtapiControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NV_STATUS gpumgmtapiMap_DISPATCH(struct GpuManagementApi *pResource, struct CALL_CONTEXT *pCallContext, RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
|
||||
return pResource->__gpumgmtapiMap__(pResource, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NvBool gpumgmtapiAccessCallback_DISPATCH(struct GpuManagementApi *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__gpumgmtapiAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
NV_STATUS gpumgmtapiConstruct_IMPL(struct GpuManagementApi *arg_pGpuMgmt, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
|
||||
#define __nvoc_gpumgmtapiConstruct(arg_pGpuMgmt, arg_pCallContext, arg_pParams) gpumgmtapiConstruct_IMPL(arg_pGpuMgmt, arg_pCallContext, arg_pParams)
|
||||
void gpumgmtapiDestruct_IMPL(struct GpuManagementApi *pGpuMgmt);
|
||||
#define __nvoc_gpumgmtapiDestruct(pGpuMgmt) gpumgmtapiDestruct_IMPL(pGpuMgmt)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#endif // GPU_MGMT_API_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_GPU_MGMT_API_NVOC_H_
|
||||
@@ -1,154 +0,0 @@
|
||||
#define NVOC_GPU_MGR_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_gpu_mgr_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xcf1b25 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMGR;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
void __nvoc_init_OBJGPUMGR(OBJGPUMGR*);
|
||||
void __nvoc_init_funcTable_OBJGPUMGR(OBJGPUMGR*);
|
||||
NV_STATUS __nvoc_ctor_OBJGPUMGR(OBJGPUMGR*);
|
||||
void __nvoc_init_dataField_OBJGPUMGR(OBJGPUMGR*);
|
||||
void __nvoc_dtor_OBJGPUMGR(OBJGPUMGR*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUMGR;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJGPUMGR_OBJGPUMGR = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJGPUMGR,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPUMGR,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJGPUMGR_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJGPUMGR, __nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPUMGR = {
|
||||
/*numRelatives=*/ 2,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_OBJGPUMGR_OBJGPUMGR,
|
||||
&__nvoc_rtti_OBJGPUMGR_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPUMGR =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(OBJGPUMGR),
|
||||
/*classId=*/ classId(OBJGPUMGR),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "OBJGPUMGR",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPUMGR,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_OBJGPUMGR,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_OBJGPUMGR
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPUMGR =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Object(Object*);
|
||||
void __nvoc_dtor_OBJGPUMGR(OBJGPUMGR *pThis) {
|
||||
__nvoc_gpumgrDestruct(pThis);
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_OBJGPUMGR(OBJGPUMGR *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Object(Object* );
|
||||
NV_STATUS __nvoc_ctor_OBJGPUMGR(OBJGPUMGR *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJGPUMGR_fail_Object;
|
||||
__nvoc_init_dataField_OBJGPUMGR(pThis);
|
||||
|
||||
status = __nvoc_gpumgrConstruct(pThis);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJGPUMGR_fail__init;
|
||||
goto __nvoc_ctor_OBJGPUMGR_exit; // Success
|
||||
|
||||
__nvoc_ctor_OBJGPUMGR_fail__init:
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_ctor_OBJGPUMGR_fail_Object:
|
||||
__nvoc_ctor_OBJGPUMGR_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_OBJGPUMGR_1(OBJGPUMGR *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_OBJGPUMGR(OBJGPUMGR *pThis) {
|
||||
__nvoc_init_funcTable_OBJGPUMGR_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_OBJGPUMGR(OBJGPUMGR *pThis) {
|
||||
pThis->__nvoc_pbase_OBJGPUMGR = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
|
||||
__nvoc_init_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_init_funcTable_OBJGPUMGR(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OBJGPUMGR(OBJGPUMGR **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
OBJGPUMGR *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(OBJGPUMGR));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(OBJGPUMGR));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPUMGR);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_OBJGPUMGR(pThis);
|
||||
status = __nvoc_ctor_OBJGPUMGR(pThis);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_OBJGPUMGR_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_OBJGPUMGR_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OBJGPUMGR(OBJGPUMGR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_OBJGPUMGR(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,444 +0,0 @@
|
||||
#define NVOC_GPU_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_gpu_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x7ef3cb = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmHalspecOwner;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTRACEABLE;
|
||||
|
||||
void __nvoc_init_OBJGPU(OBJGPU*,
|
||||
NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev,
|
||||
RM_RUNTIME_VARIANT RmVariantHal_rmVariant,
|
||||
NvU32 DispIpHal_ipver);
|
||||
void __nvoc_init_funcTable_OBJGPU(OBJGPU*);
|
||||
NV_STATUS __nvoc_ctor_OBJGPU(OBJGPU*, NvU32 arg_gpuInstance);
|
||||
void __nvoc_init_dataField_OBJGPU(OBJGPU*);
|
||||
void __nvoc_dtor_OBJGPU(OBJGPU*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPU;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_OBJGPU = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJGPU,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJGPU,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJGPU, __nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_RmHalspecOwner = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmHalspecOwner,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJGPU, __nvoc_base_RmHalspecOwner),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJGPU_OBJTRACEABLE = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJTRACEABLE,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJGPU, __nvoc_base_OBJTRACEABLE),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJGPU = {
|
||||
/*numRelatives=*/ 4,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_OBJGPU_OBJGPU,
|
||||
&__nvoc_rtti_OBJGPU_OBJTRACEABLE,
|
||||
&__nvoc_rtti_OBJGPU_RmHalspecOwner,
|
||||
&__nvoc_rtti_OBJGPU_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJGPU =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(OBJGPU),
|
||||
/*classId=*/ classId(OBJGPU),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "OBJGPU",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJGPU,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_OBJGPU,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_OBJGPU
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJGPU =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Object(Object*);
|
||||
void __nvoc_dtor_RmHalspecOwner(RmHalspecOwner*);
|
||||
void __nvoc_dtor_OBJTRACEABLE(OBJTRACEABLE*);
|
||||
void __nvoc_dtor_OBJGPU(OBJGPU *pThis) {
|
||||
__nvoc_gpuDestruct(pThis);
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_dtor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner);
|
||||
__nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_OBJGPU(OBJGPU *pThis) {
|
||||
ChipHal *chipHal = &staticCast(pThis, RmHalspecOwner)->chipHal;
|
||||
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
|
||||
RmVariantHal *rmVariantHal = &staticCast(pThis, RmHalspecOwner)->rmVariantHal;
|
||||
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_IS_CONNECTED, ((NvBool)(0 == 0)));
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY, ((NvBool)(0 == 0)));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_TEGRA_SOC_NVDISPLAY, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_GPU_TEGRA_SOC_IGPU
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_TEGRA_SOC_IGPU, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_GPU_ATS_SUPPORTED
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_ATS_SUPPORTED, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_GPU_IS_UEFI
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_IS_UEFI, ((NvBool)(0 == 0)));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_IS_UEFI, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_GPU_ZERO_FB
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_ZERO_FB, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_CAN_OPTIMIZE_COMPUTE_USE_CASE, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_GPU_MIG_SUPPORTED
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_MIG_SUPPORTED, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_VC_CAPABILITY_SUPPORTED, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_RESETLESS_MIG_SUPPORTED, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_GPU_IS_COT_ENABLED
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_GPU_IS_COT_ENABLED, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
pThis->boardId = ~0;
|
||||
|
||||
pThis->deviceInstance = 32;
|
||||
|
||||
// Hal field -- isVirtual
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
|
||||
{
|
||||
pThis->isVirtual = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- isGspClient
|
||||
if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
|
||||
{
|
||||
pThis->isGspClient = ((NvBool)(0 == 0));
|
||||
}
|
||||
else if (0)
|
||||
{
|
||||
}
|
||||
|
||||
pThis->bIsDebugModeEnabled = ((NvBool)(0 != 0));
|
||||
|
||||
pThis->numOfMclkLockRequests = 0U;
|
||||
|
||||
pThis->bUseRegisterAccessMap = !(0);
|
||||
|
||||
pThis->boardInfo = ((void *)0);
|
||||
|
||||
// Hal field -- bUnifiedMemorySpaceEnabled
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->bUnifiedMemorySpaceEnabled = ((NvBool)(0 == 0));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bUnifiedMemorySpaceEnabled = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bWarBug200577889SriovHeavyEnabled
|
||||
pThis->bWarBug200577889SriovHeavyEnabled = ((NvBool)(0 != 0));
|
||||
|
||||
// Hal field -- bNeed4kPageIsolation
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bNeed4kPageIsolation = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bInstLoc47bitPaWar
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bInstLoc47bitPaWar = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bIsBarPteInSysmemSupported
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->bIsBarPteInSysmemSupported = ((NvBool)(0 == 0));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bIsBarPteInSysmemSupported = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bClientRmAllocatedCtxBuffer
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bClientRmAllocatedCtxBuffer = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bVidmemPreservationBrokenBug3172217
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bVidmemPreservationBrokenBug3172217 = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bInstanceMemoryAlwaysCached
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->bInstanceMemoryAlwaysCached = ((NvBool)(0 == 0));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bInstanceMemoryAlwaysCached = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
pThis->bIsGeforce = ((NvBool)(0 == 0));
|
||||
|
||||
// Hal field -- bComputePolicyTimesliceSupported
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bComputePolicyTimesliceSupported = ((NvBool)(0 != 0));
|
||||
}
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Object(Object* );
|
||||
NV_STATUS __nvoc_ctor_RmHalspecOwner(RmHalspecOwner* );
|
||||
NV_STATUS __nvoc_ctor_OBJTRACEABLE(OBJTRACEABLE* );
|
||||
NV_STATUS __nvoc_ctor_OBJGPU(OBJGPU *pThis, NvU32 arg_gpuInstance) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_Object;
|
||||
status = __nvoc_ctor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_RmHalspecOwner;
|
||||
status = __nvoc_ctor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail_OBJTRACEABLE;
|
||||
__nvoc_init_dataField_OBJGPU(pThis);
|
||||
|
||||
status = __nvoc_gpuConstruct(pThis, arg_gpuInstance);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJGPU_fail__init;
|
||||
goto __nvoc_ctor_OBJGPU_exit; // Success
|
||||
|
||||
__nvoc_ctor_OBJGPU_fail__init:
|
||||
__nvoc_dtor_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE);
|
||||
__nvoc_ctor_OBJGPU_fail_OBJTRACEABLE:
|
||||
__nvoc_dtor_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner);
|
||||
__nvoc_ctor_OBJGPU_fail_RmHalspecOwner:
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_ctor_OBJGPU_fail_Object:
|
||||
__nvoc_ctor_OBJGPU_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_OBJGPU_1(OBJGPU *pThis) {
|
||||
ChipHal *chipHal = &staticCast(pThis, RmHalspecOwner)->chipHal;
|
||||
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
|
||||
RmVariantHal *rmVariantHal = &staticCast(pThis, RmHalspecOwner)->rmVariantHal;
|
||||
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_OBJGPU(OBJGPU *pThis) {
|
||||
__nvoc_init_funcTable_OBJGPU_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_RmHalspecOwner(RmHalspecOwner*, NvU32, NvU32, NvU32, RM_RUNTIME_VARIANT, NvU32);
|
||||
void __nvoc_init_OBJTRACEABLE(OBJTRACEABLE*);
|
||||
void __nvoc_init_OBJGPU(OBJGPU *pThis,
|
||||
NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev,
|
||||
RM_RUNTIME_VARIANT RmVariantHal_rmVariant,
|
||||
NvU32 DispIpHal_ipver) {
|
||||
pThis->__nvoc_pbase_OBJGPU = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RmHalspecOwner = &pThis->__nvoc_base_RmHalspecOwner;
|
||||
pThis->__nvoc_pbase_OBJTRACEABLE = &pThis->__nvoc_base_OBJTRACEABLE;
|
||||
__nvoc_init_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_init_RmHalspecOwner(&pThis->__nvoc_base_RmHalspecOwner, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver);
|
||||
__nvoc_init_OBJTRACEABLE(&pThis->__nvoc_base_OBJTRACEABLE);
|
||||
__nvoc_init_funcTable_OBJGPU(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OBJGPU(OBJGPU **ppThis, Dynamic *pParent, NvU32 createFlags,
|
||||
NvU32 ChipHal_arch, NvU32 ChipHal_impl, NvU32 ChipHal_hidrev,
|
||||
RM_RUNTIME_VARIANT RmVariantHal_rmVariant,
|
||||
NvU32 DispIpHal_ipver, NvU32 arg_gpuInstance) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
OBJGPU *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(OBJGPU));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(OBJGPU));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJGPU);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_OBJGPU(pThis, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver);
|
||||
status = __nvoc_ctor_OBJGPU(pThis, arg_gpuInstance);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_OBJGPU_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_OBJGPU_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OBJGPU(OBJGPU **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
NvU32 ChipHal_arch = va_arg(args, NvU32);
|
||||
NvU32 ChipHal_impl = va_arg(args, NvU32);
|
||||
NvU32 ChipHal_hidrev = va_arg(args, NvU32);
|
||||
RM_RUNTIME_VARIANT RmVariantHal_rmVariant = va_arg(args, RM_RUNTIME_VARIANT);
|
||||
NvU32 DispIpHal_ipver = va_arg(args, NvU32);
|
||||
NvU32 arg_gpuInstance = va_arg(args, NvU32);
|
||||
|
||||
status = __nvoc_objCreate_OBJGPU(ppThis, pParent, createFlags, ChipHal_arch, ChipHal_impl, ChipHal_hidrev, RmVariantHal_rmVariant, DispIpHal_ipver, arg_gpuInstance);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,309 +0,0 @@
|
||||
#define NVOC_GPU_RESOURCE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_gpu_resource_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x5d5d9f = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
void __nvoc_init_GpuResource(GpuResource*);
|
||||
void __nvoc_init_funcTable_GpuResource(GpuResource*);
|
||||
NV_STATUS __nvoc_ctor_GpuResource(GpuResource*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_GpuResource(GpuResource*);
|
||||
void __nvoc_dtor_GpuResource(GpuResource*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuResource;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GpuResource_GpuResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_GpuResource,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GpuResource_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GpuResource_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GpuResource_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_GpuResource_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(GpuResource, __nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_GpuResource = {
|
||||
/*numRelatives=*/ 5,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_GpuResource_GpuResource,
|
||||
&__nvoc_rtti_GpuResource_RmResource,
|
||||
&__nvoc_rtti_GpuResource_RmResourceCommon,
|
||||
&__nvoc_rtti_GpuResource_RsResource,
|
||||
&__nvoc_rtti_GpuResource_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(GpuResource),
|
||||
/*classId=*/ classId(GpuResource),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "GpuResource",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_GpuResource,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_GpuResource,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_GpuResource
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_resControl(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_resMap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_resUnmap(struct RsResource *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_GpuResource_rmresShareCallback(struct RmResource *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) - __nvoc_rtti_GpuResource_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_gpuresCheckMemInterUnmap(struct GpuResource *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuResource_RmResource.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_gpuresGetMemInterMapParams(struct GpuResource *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuResource_RmResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_gpuresGetMemoryMappingDescriptor(struct GpuResource *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_GpuResource_RmResource.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_gpuresGetRefCount(struct GpuResource *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_gpuresControlFilter(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_gpuresAddAdditionalDependants(struct RsClient *pClient, struct GpuResource *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_gpuresControl_Prologue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RsResource_gpuresCanCopy(struct GpuResource *pResource) {
|
||||
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_gpuresMapTo(struct GpuResource *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_gpuresPreDestruct(struct GpuResource *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_gpuresUnmapFrom(struct GpuResource *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_gpuresControl_Epilogue(struct GpuResource *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_gpuresControlLookup(struct GpuResource *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_gpuresAccessCallback(struct GpuResource *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_GpuResource_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_GpuResource =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_RmResource(RmResource*);
|
||||
void __nvoc_dtor_GpuResource(GpuResource *pThis) {
|
||||
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_GpuResource(GpuResource *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_RmResource(RmResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_GpuResource(GpuResource *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_GpuResource_fail_RmResource;
|
||||
__nvoc_init_dataField_GpuResource(pThis);
|
||||
|
||||
status = __nvoc_gpuresConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_GpuResource_fail__init;
|
||||
goto __nvoc_ctor_GpuResource_exit; // Success
|
||||
|
||||
__nvoc_ctor_GpuResource_fail__init:
|
||||
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
__nvoc_ctor_GpuResource_fail_RmResource:
|
||||
__nvoc_ctor_GpuResource_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_GpuResource_1(GpuResource *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__gpuresControl__ = &gpuresControl_IMPL;
|
||||
|
||||
pThis->__gpuresMap__ = &gpuresMap_IMPL;
|
||||
|
||||
pThis->__gpuresUnmap__ = &gpuresUnmap_IMPL;
|
||||
|
||||
pThis->__gpuresShareCallback__ = &gpuresShareCallback_IMPL;
|
||||
|
||||
pThis->__gpuresGetRegBaseOffsetAndSize__ = &gpuresGetRegBaseOffsetAndSize_IMPL;
|
||||
|
||||
pThis->__gpuresGetMapAddrSpace__ = &gpuresGetMapAddrSpace_IMPL;
|
||||
|
||||
pThis->__gpuresInternalControlForward__ = &gpuresInternalControlForward_IMPL;
|
||||
|
||||
pThis->__gpuresGetInternalObjectHandle__ = &gpuresGetInternalObjectHandle_IMPL;
|
||||
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resControl__ = &__nvoc_thunk_GpuResource_resControl;
|
||||
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resMap__ = &__nvoc_thunk_GpuResource_resMap;
|
||||
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resUnmap__ = &__nvoc_thunk_GpuResource_resUnmap;
|
||||
|
||||
pThis->__nvoc_base_RmResource.__rmresShareCallback__ = &__nvoc_thunk_GpuResource_rmresShareCallback;
|
||||
|
||||
pThis->__gpuresCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_gpuresCheckMemInterUnmap;
|
||||
|
||||
pThis->__gpuresGetMemInterMapParams__ = &__nvoc_thunk_RmResource_gpuresGetMemInterMapParams;
|
||||
|
||||
pThis->__gpuresGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_gpuresGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__gpuresGetRefCount__ = &__nvoc_thunk_RsResource_gpuresGetRefCount;
|
||||
|
||||
pThis->__gpuresControlFilter__ = &__nvoc_thunk_RsResource_gpuresControlFilter;
|
||||
|
||||
pThis->__gpuresAddAdditionalDependants__ = &__nvoc_thunk_RsResource_gpuresAddAdditionalDependants;
|
||||
|
||||
pThis->__gpuresControl_Prologue__ = &__nvoc_thunk_RmResource_gpuresControl_Prologue;
|
||||
|
||||
pThis->__gpuresCanCopy__ = &__nvoc_thunk_RsResource_gpuresCanCopy;
|
||||
|
||||
pThis->__gpuresMapTo__ = &__nvoc_thunk_RsResource_gpuresMapTo;
|
||||
|
||||
pThis->__gpuresPreDestruct__ = &__nvoc_thunk_RsResource_gpuresPreDestruct;
|
||||
|
||||
pThis->__gpuresUnmapFrom__ = &__nvoc_thunk_RsResource_gpuresUnmapFrom;
|
||||
|
||||
pThis->__gpuresControl_Epilogue__ = &__nvoc_thunk_RmResource_gpuresControl_Epilogue;
|
||||
|
||||
pThis->__gpuresControlLookup__ = &__nvoc_thunk_RsResource_gpuresControlLookup;
|
||||
|
||||
pThis->__gpuresAccessCallback__ = &__nvoc_thunk_RmResource_gpuresAccessCallback;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_GpuResource(GpuResource *pThis) {
|
||||
__nvoc_init_funcTable_GpuResource_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_RmResource(RmResource*);
|
||||
void __nvoc_init_GpuResource(GpuResource *pThis) {
|
||||
pThis->__nvoc_pbase_GpuResource = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource;
|
||||
__nvoc_init_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
__nvoc_init_funcTable_GpuResource(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_GpuResource(GpuResource **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
GpuResource *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(GpuResource));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(GpuResource));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_GpuResource);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_GpuResource(pThis);
|
||||
status = __nvoc_ctor_GpuResource(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_GpuResource_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_GpuResource_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_GpuResource(GpuResource **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_GpuResource(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,154 +0,0 @@
|
||||
#define NVOC_HAL_MGR_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_hal_mgr_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xbf26de = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
void __nvoc_init_OBJHALMGR(OBJHALMGR*);
|
||||
void __nvoc_init_funcTable_OBJHALMGR(OBJHALMGR*);
|
||||
NV_STATUS __nvoc_ctor_OBJHALMGR(OBJHALMGR*);
|
||||
void __nvoc_init_dataField_OBJHALMGR(OBJHALMGR*);
|
||||
void __nvoc_dtor_OBJHALMGR(OBJHALMGR*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHALMGR;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJHALMGR_OBJHALMGR = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJHALMGR,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJHALMGR,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJHALMGR_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJHALMGR, __nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJHALMGR = {
|
||||
/*numRelatives=*/ 2,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_OBJHALMGR_OBJHALMGR,
|
||||
&__nvoc_rtti_OBJHALMGR_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHALMGR =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(OBJHALMGR),
|
||||
/*classId=*/ classId(OBJHALMGR),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "OBJHALMGR",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJHALMGR,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_OBJHALMGR,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_OBJHALMGR
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHALMGR =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Object(Object*);
|
||||
void __nvoc_dtor_OBJHALMGR(OBJHALMGR *pThis) {
|
||||
__nvoc_halmgrDestruct(pThis);
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_OBJHALMGR(OBJHALMGR *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Object(Object* );
|
||||
NV_STATUS __nvoc_ctor_OBJHALMGR(OBJHALMGR *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJHALMGR_fail_Object;
|
||||
__nvoc_init_dataField_OBJHALMGR(pThis);
|
||||
|
||||
status = __nvoc_halmgrConstruct(pThis);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJHALMGR_fail__init;
|
||||
goto __nvoc_ctor_OBJHALMGR_exit; // Success
|
||||
|
||||
__nvoc_ctor_OBJHALMGR_fail__init:
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_ctor_OBJHALMGR_fail_Object:
|
||||
__nvoc_ctor_OBJHALMGR_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_OBJHALMGR_1(OBJHALMGR *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_OBJHALMGR(OBJHALMGR *pThis) {
|
||||
__nvoc_init_funcTable_OBJHALMGR_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_OBJHALMGR(OBJHALMGR *pThis) {
|
||||
pThis->__nvoc_pbase_OBJHALMGR = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
|
||||
__nvoc_init_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_init_funcTable_OBJHALMGR(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OBJHALMGR(OBJHALMGR **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
OBJHALMGR *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(OBJHALMGR));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(OBJHALMGR));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJHALMGR);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_OBJHALMGR(pThis);
|
||||
status = __nvoc_ctor_OBJHALMGR(pThis);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_OBJHALMGR_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_OBJHALMGR_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OBJHALMGR(OBJHALMGR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_OBJHALMGR(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,148 +0,0 @@
|
||||
#define NVOC_HAL_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_hal_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xe803b6 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
void __nvoc_init_OBJHAL(OBJHAL*);
|
||||
void __nvoc_init_funcTable_OBJHAL(OBJHAL*);
|
||||
NV_STATUS __nvoc_ctor_OBJHAL(OBJHAL*);
|
||||
void __nvoc_init_dataField_OBJHAL(OBJHAL*);
|
||||
void __nvoc_dtor_OBJHAL(OBJHAL*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHAL;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJHAL_OBJHAL = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJHAL,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJHAL,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJHAL_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJHAL, __nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJHAL = {
|
||||
/*numRelatives=*/ 2,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_OBJHAL_OBJHAL,
|
||||
&__nvoc_rtti_OBJHAL_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJHAL =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(OBJHAL),
|
||||
/*classId=*/ classId(OBJHAL),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "OBJHAL",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJHAL,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_OBJHAL,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_OBJHAL
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJHAL =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Object(Object*);
|
||||
void __nvoc_dtor_OBJHAL(OBJHAL *pThis) {
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_OBJHAL(OBJHAL *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Object(Object* );
|
||||
NV_STATUS __nvoc_ctor_OBJHAL(OBJHAL *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJHAL_fail_Object;
|
||||
__nvoc_init_dataField_OBJHAL(pThis);
|
||||
goto __nvoc_ctor_OBJHAL_exit; // Success
|
||||
|
||||
__nvoc_ctor_OBJHAL_fail_Object:
|
||||
__nvoc_ctor_OBJHAL_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_OBJHAL_1(OBJHAL *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_OBJHAL(OBJHAL *pThis) {
|
||||
__nvoc_init_funcTable_OBJHAL_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_OBJHAL(OBJHAL *pThis) {
|
||||
pThis->__nvoc_pbase_OBJHAL = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
|
||||
__nvoc_init_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_init_funcTable_OBJHAL(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OBJHAL(OBJHAL **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
OBJHAL *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(OBJHAL));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(OBJHAL));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJHAL);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_OBJHAL(pThis);
|
||||
status = __nvoc_ctor_OBJHAL(pThis);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_OBJHAL_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_OBJHAL_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OBJHAL(OBJHAL **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_OBJHAL(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,327 +0,0 @@
|
||||
#define NVOC_HDA_CODEC_API_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_hda_codec_api_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xf59a20 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_GpuResource;
|
||||
|
||||
void __nvoc_init_Hdacodec(Hdacodec*);
|
||||
void __nvoc_init_funcTable_Hdacodec(Hdacodec*);
|
||||
NV_STATUS __nvoc_ctor_Hdacodec(Hdacodec*, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_Hdacodec(Hdacodec*);
|
||||
void __nvoc_dtor_Hdacodec(Hdacodec*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Hdacodec;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_Hdacodec = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Hdacodec,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Hdacodec,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource.__nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Hdacodec_GpuResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_GpuResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Hdacodec, __nvoc_base_GpuResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_Hdacodec = {
|
||||
/*numRelatives=*/ 6,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_Hdacodec_Hdacodec,
|
||||
&__nvoc_rtti_Hdacodec_GpuResource,
|
||||
&__nvoc_rtti_Hdacodec_RmResource,
|
||||
&__nvoc_rtti_Hdacodec_RmResourceCommon,
|
||||
&__nvoc_rtti_Hdacodec_RsResource,
|
||||
&__nvoc_rtti_Hdacodec_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(Hdacodec),
|
||||
/*classId=*/ classId(Hdacodec),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "Hdacodec",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Hdacodec,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_Hdacodec,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_Hdacodec
|
||||
};
|
||||
|
||||
static NvBool __nvoc_thunk_GpuResource_hdacodecShareCallback(struct Hdacodec *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return gpuresShareCallback((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_hdacodecControl(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return gpuresControl((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_hdacodecUnmap(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresUnmap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_hdacodecGetMemInterMapParams(struct Hdacodec *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return rmresGetMemInterMapParams((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_hdacodecGetMemoryMappingDescriptor(struct Hdacodec *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return rmresGetMemoryMappingDescriptor((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Hdacodec_RmResource.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_hdacodecGetMapAddrSpace(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return gpuresGetMapAddrSpace((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static NvHandle __nvoc_thunk_GpuResource_hdacodecGetInternalObjectHandle(struct Hdacodec *pGpuResource) {
|
||||
return gpuresGetInternalObjectHandle((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_hdacodecControlFilter(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_hdacodecAddAdditionalDependants(struct RsClient *pClient, struct Hdacodec *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_hdacodecGetRefCount(struct Hdacodec *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_hdacodecCheckMemInterUnmap(struct Hdacodec *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return rmresCheckMemInterUnmap((struct RmResource *)(((unsigned char *)pRmResource) + __nvoc_rtti_Hdacodec_RmResource.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_hdacodecMapTo(struct Hdacodec *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_hdacodecControl_Prologue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_hdacodecGetRegBaseOffsetAndSize(struct Hdacodec *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return gpuresGetRegBaseOffsetAndSize((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RsResource_hdacodecCanCopy(struct Hdacodec *pResource) {
|
||||
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_hdacodecInternalControlForward(struct Hdacodec *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return gpuresInternalControlForward((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), command, pParams, size);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_hdacodecPreDestruct(struct Hdacodec *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_hdacodecUnmapFrom(struct Hdacodec *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_hdacodecControl_Epilogue(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_hdacodecControlLookup(struct Hdacodec *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_GpuResource_hdacodecMap(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return gpuresMap((struct GpuResource *)(((unsigned char *)pGpuResource) + __nvoc_rtti_Hdacodec_GpuResource.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_hdacodecAccessCallback(struct Hdacodec *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Hdacodec_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_Hdacodec =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_GpuResource(GpuResource*);
|
||||
void __nvoc_dtor_Hdacodec(Hdacodec *pThis) {
|
||||
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_Hdacodec(Hdacodec *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_GpuResource(GpuResource* , struct CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_Hdacodec(Hdacodec *pThis, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_GpuResource(&pThis->__nvoc_base_GpuResource, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_Hdacodec_fail_GpuResource;
|
||||
__nvoc_init_dataField_Hdacodec(pThis);
|
||||
|
||||
status = __nvoc_hdacodecConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_Hdacodec_fail__init;
|
||||
goto __nvoc_ctor_Hdacodec_exit; // Success
|
||||
|
||||
__nvoc_ctor_Hdacodec_fail__init:
|
||||
__nvoc_dtor_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
__nvoc_ctor_Hdacodec_fail_GpuResource:
|
||||
__nvoc_ctor_Hdacodec_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_Hdacodec_1(Hdacodec *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__hdacodecShareCallback__ = &__nvoc_thunk_GpuResource_hdacodecShareCallback;
|
||||
|
||||
pThis->__hdacodecControl__ = &__nvoc_thunk_GpuResource_hdacodecControl;
|
||||
|
||||
pThis->__hdacodecUnmap__ = &__nvoc_thunk_GpuResource_hdacodecUnmap;
|
||||
|
||||
pThis->__hdacodecGetMemInterMapParams__ = &__nvoc_thunk_RmResource_hdacodecGetMemInterMapParams;
|
||||
|
||||
pThis->__hdacodecGetMemoryMappingDescriptor__ = &__nvoc_thunk_RmResource_hdacodecGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__hdacodecGetMapAddrSpace__ = &__nvoc_thunk_GpuResource_hdacodecGetMapAddrSpace;
|
||||
|
||||
pThis->__hdacodecGetInternalObjectHandle__ = &__nvoc_thunk_GpuResource_hdacodecGetInternalObjectHandle;
|
||||
|
||||
pThis->__hdacodecControlFilter__ = &__nvoc_thunk_RsResource_hdacodecControlFilter;
|
||||
|
||||
pThis->__hdacodecAddAdditionalDependants__ = &__nvoc_thunk_RsResource_hdacodecAddAdditionalDependants;
|
||||
|
||||
pThis->__hdacodecGetRefCount__ = &__nvoc_thunk_RsResource_hdacodecGetRefCount;
|
||||
|
||||
pThis->__hdacodecCheckMemInterUnmap__ = &__nvoc_thunk_RmResource_hdacodecCheckMemInterUnmap;
|
||||
|
||||
pThis->__hdacodecMapTo__ = &__nvoc_thunk_RsResource_hdacodecMapTo;
|
||||
|
||||
pThis->__hdacodecControl_Prologue__ = &__nvoc_thunk_RmResource_hdacodecControl_Prologue;
|
||||
|
||||
pThis->__hdacodecGetRegBaseOffsetAndSize__ = &__nvoc_thunk_GpuResource_hdacodecGetRegBaseOffsetAndSize;
|
||||
|
||||
pThis->__hdacodecCanCopy__ = &__nvoc_thunk_RsResource_hdacodecCanCopy;
|
||||
|
||||
pThis->__hdacodecInternalControlForward__ = &__nvoc_thunk_GpuResource_hdacodecInternalControlForward;
|
||||
|
||||
pThis->__hdacodecPreDestruct__ = &__nvoc_thunk_RsResource_hdacodecPreDestruct;
|
||||
|
||||
pThis->__hdacodecUnmapFrom__ = &__nvoc_thunk_RsResource_hdacodecUnmapFrom;
|
||||
|
||||
pThis->__hdacodecControl_Epilogue__ = &__nvoc_thunk_RmResource_hdacodecControl_Epilogue;
|
||||
|
||||
pThis->__hdacodecControlLookup__ = &__nvoc_thunk_RsResource_hdacodecControlLookup;
|
||||
|
||||
pThis->__hdacodecMap__ = &__nvoc_thunk_GpuResource_hdacodecMap;
|
||||
|
||||
pThis->__hdacodecAccessCallback__ = &__nvoc_thunk_RmResource_hdacodecAccessCallback;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_Hdacodec(Hdacodec *pThis) {
|
||||
__nvoc_init_funcTable_Hdacodec_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_GpuResource(GpuResource*);
|
||||
void __nvoc_init_Hdacodec(Hdacodec *pThis) {
|
||||
pThis->__nvoc_pbase_Hdacodec = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource;
|
||||
pThis->__nvoc_pbase_GpuResource = &pThis->__nvoc_base_GpuResource;
|
||||
__nvoc_init_GpuResource(&pThis->__nvoc_base_GpuResource);
|
||||
__nvoc_init_funcTable_Hdacodec(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_Hdacodec(Hdacodec **ppThis, Dynamic *pParent, NvU32 createFlags, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
Hdacodec *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(Hdacodec));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(Hdacodec));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Hdacodec);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_GpuResource.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_Hdacodec(pThis);
|
||||
status = __nvoc_ctor_Hdacodec(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_Hdacodec_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_Hdacodec_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_Hdacodec(Hdacodec **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
struct CALL_CONTEXT * arg_pCallContext = va_arg(args, struct CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_Hdacodec(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,229 +0,0 @@
|
||||
#ifndef _G_HDA_CODEC_API_NVOC_H_
|
||||
#define _G_HDA_CODEC_API_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2016-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "g_hda_codec_api_nvoc.h"
|
||||
|
||||
#ifndef HDA_CODEC_API_H
|
||||
#define HDA_CODEC_API_H
|
||||
|
||||
#include "resserv/resserv.h"
|
||||
#include "nvoc/prelude.h"
|
||||
#include "resserv/rs_resource.h"
|
||||
#include "ctrl/ctrl90ec.h"
|
||||
#include "gpu/gpu_resource.h"
|
||||
|
||||
#ifdef NVOC_HDA_CODEC_API_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct Hdacodec {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct GpuResource __nvoc_base_GpuResource;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct GpuResource *__nvoc_pbase_GpuResource;
|
||||
struct Hdacodec *__nvoc_pbase_Hdacodec;
|
||||
NvBool (*__hdacodecShareCallback__)(struct Hdacodec *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NV_STATUS (*__hdacodecControl__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__hdacodecUnmap__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RsCpuMapping *);
|
||||
NV_STATUS (*__hdacodecGetMemInterMapParams__)(struct Hdacodec *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__hdacodecGetMemoryMappingDescriptor__)(struct Hdacodec *, struct MEMORY_DESCRIPTOR **);
|
||||
NV_STATUS (*__hdacodecGetMapAddrSpace__)(struct Hdacodec *, struct CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
|
||||
NvHandle (*__hdacodecGetInternalObjectHandle__)(struct Hdacodec *);
|
||||
NV_STATUS (*__hdacodecControlFilter__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
void (*__hdacodecAddAdditionalDependants__)(struct RsClient *, struct Hdacodec *, RsResourceRef *);
|
||||
NvU32 (*__hdacodecGetRefCount__)(struct Hdacodec *);
|
||||
NV_STATUS (*__hdacodecCheckMemInterUnmap__)(struct Hdacodec *, NvBool);
|
||||
NV_STATUS (*__hdacodecMapTo__)(struct Hdacodec *, RS_RES_MAP_TO_PARAMS *);
|
||||
NV_STATUS (*__hdacodecControl_Prologue__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__hdacodecGetRegBaseOffsetAndSize__)(struct Hdacodec *, struct OBJGPU *, NvU32 *, NvU32 *);
|
||||
NvBool (*__hdacodecCanCopy__)(struct Hdacodec *);
|
||||
NV_STATUS (*__hdacodecInternalControlForward__)(struct Hdacodec *, NvU32, void *, NvU32);
|
||||
void (*__hdacodecPreDestruct__)(struct Hdacodec *);
|
||||
NV_STATUS (*__hdacodecUnmapFrom__)(struct Hdacodec *, RS_RES_UNMAP_FROM_PARAMS *);
|
||||
void (*__hdacodecControl_Epilogue__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__hdacodecControlLookup__)(struct Hdacodec *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NV_STATUS (*__hdacodecMap__)(struct Hdacodec *, struct CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, struct RsCpuMapping *);
|
||||
NvBool (*__hdacodecAccessCallback__)(struct Hdacodec *, struct RsClient *, void *, RsAccessRight);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_Hdacodec_TYPEDEF__
|
||||
#define __NVOC_CLASS_Hdacodec_TYPEDEF__
|
||||
typedef struct Hdacodec Hdacodec;
|
||||
#endif /* __NVOC_CLASS_Hdacodec_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_Hdacodec
|
||||
#define __nvoc_class_id_Hdacodec 0xf59a20
|
||||
#endif /* __nvoc_class_id_Hdacodec */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Hdacodec;
|
||||
|
||||
#define __staticCast_Hdacodec(pThis) \
|
||||
((pThis)->__nvoc_pbase_Hdacodec)
|
||||
|
||||
#ifdef __nvoc_hda_codec_api_h_disabled
|
||||
#define __dynamicCast_Hdacodec(pThis) ((Hdacodec*)NULL)
|
||||
#else //__nvoc_hda_codec_api_h_disabled
|
||||
#define __dynamicCast_Hdacodec(pThis) \
|
||||
((Hdacodec*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Hdacodec)))
|
||||
#endif //__nvoc_hda_codec_api_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_Hdacodec(Hdacodec**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_Hdacodec(Hdacodec**, Dynamic*, NvU32, struct CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
#define __objCreate_Hdacodec(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
|
||||
__nvoc_objCreate_Hdacodec((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
|
||||
|
||||
#define hdacodecShareCallback(pGpuResource, pInvokingClient, pParentRef, pSharePolicy) hdacodecShareCallback_DISPATCH(pGpuResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define hdacodecControl(pGpuResource, pCallContext, pParams) hdacodecControl_DISPATCH(pGpuResource, pCallContext, pParams)
|
||||
#define hdacodecUnmap(pGpuResource, pCallContext, pCpuMapping) hdacodecUnmap_DISPATCH(pGpuResource, pCallContext, pCpuMapping)
|
||||
#define hdacodecGetMemInterMapParams(pRmResource, pParams) hdacodecGetMemInterMapParams_DISPATCH(pRmResource, pParams)
|
||||
#define hdacodecGetMemoryMappingDescriptor(pRmResource, ppMemDesc) hdacodecGetMemoryMappingDescriptor_DISPATCH(pRmResource, ppMemDesc)
|
||||
#define hdacodecGetMapAddrSpace(pGpuResource, pCallContext, mapFlags, pAddrSpace) hdacodecGetMapAddrSpace_DISPATCH(pGpuResource, pCallContext, mapFlags, pAddrSpace)
|
||||
#define hdacodecGetInternalObjectHandle(pGpuResource) hdacodecGetInternalObjectHandle_DISPATCH(pGpuResource)
|
||||
#define hdacodecControlFilter(pResource, pCallContext, pParams) hdacodecControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define hdacodecAddAdditionalDependants(pClient, pResource, pReference) hdacodecAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define hdacodecGetRefCount(pResource) hdacodecGetRefCount_DISPATCH(pResource)
|
||||
#define hdacodecCheckMemInterUnmap(pRmResource, bSubdeviceHandleProvided) hdacodecCheckMemInterUnmap_DISPATCH(pRmResource, bSubdeviceHandleProvided)
|
||||
#define hdacodecMapTo(pResource, pParams) hdacodecMapTo_DISPATCH(pResource, pParams)
|
||||
#define hdacodecControl_Prologue(pResource, pCallContext, pParams) hdacodecControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define hdacodecGetRegBaseOffsetAndSize(pGpuResource, pGpu, pOffset, pSize) hdacodecGetRegBaseOffsetAndSize_DISPATCH(pGpuResource, pGpu, pOffset, pSize)
|
||||
#define hdacodecCanCopy(pResource) hdacodecCanCopy_DISPATCH(pResource)
|
||||
#define hdacodecInternalControlForward(pGpuResource, command, pParams, size) hdacodecInternalControlForward_DISPATCH(pGpuResource, command, pParams, size)
|
||||
#define hdacodecPreDestruct(pResource) hdacodecPreDestruct_DISPATCH(pResource)
|
||||
#define hdacodecUnmapFrom(pResource, pParams) hdacodecUnmapFrom_DISPATCH(pResource, pParams)
|
||||
#define hdacodecControl_Epilogue(pResource, pCallContext, pParams) hdacodecControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define hdacodecControlLookup(pResource, pParams, ppEntry) hdacodecControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define hdacodecMap(pGpuResource, pCallContext, pParams, pCpuMapping) hdacodecMap_DISPATCH(pGpuResource, pCallContext, pParams, pCpuMapping)
|
||||
#define hdacodecAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) hdacodecAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
static inline NvBool hdacodecShareCallback_DISPATCH(struct Hdacodec *pGpuResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pGpuResource->__hdacodecShareCallback__(pGpuResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecControl_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pGpuResource->__hdacodecControl__(pGpuResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecUnmap_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__hdacodecUnmap__(pGpuResource, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecGetMemInterMapParams_DISPATCH(struct Hdacodec *pRmResource, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pRmResource->__hdacodecGetMemInterMapParams__(pRmResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecGetMemoryMappingDescriptor_DISPATCH(struct Hdacodec *pRmResource, struct MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pRmResource->__hdacodecGetMemoryMappingDescriptor__(pRmResource, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecGetMapAddrSpace_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return pGpuResource->__hdacodecGetMapAddrSpace__(pGpuResource, pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static inline NvHandle hdacodecGetInternalObjectHandle_DISPATCH(struct Hdacodec *pGpuResource) {
|
||||
return pGpuResource->__hdacodecGetInternalObjectHandle__(pGpuResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecControlFilter_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__hdacodecControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline void hdacodecAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Hdacodec *pResource, RsResourceRef *pReference) {
|
||||
pResource->__hdacodecAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline NvU32 hdacodecGetRefCount_DISPATCH(struct Hdacodec *pResource) {
|
||||
return pResource->__hdacodecGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecCheckMemInterUnmap_DISPATCH(struct Hdacodec *pRmResource, NvBool bSubdeviceHandleProvided) {
|
||||
return pRmResource->__hdacodecCheckMemInterUnmap__(pRmResource, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecMapTo_DISPATCH(struct Hdacodec *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pResource->__hdacodecMapTo__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecControl_Prologue_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__hdacodecControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecGetRegBaseOffsetAndSize_DISPATCH(struct Hdacodec *pGpuResource, struct OBJGPU *pGpu, NvU32 *pOffset, NvU32 *pSize) {
|
||||
return pGpuResource->__hdacodecGetRegBaseOffsetAndSize__(pGpuResource, pGpu, pOffset, pSize);
|
||||
}
|
||||
|
||||
static inline NvBool hdacodecCanCopy_DISPATCH(struct Hdacodec *pResource) {
|
||||
return pResource->__hdacodecCanCopy__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecInternalControlForward_DISPATCH(struct Hdacodec *pGpuResource, NvU32 command, void *pParams, NvU32 size) {
|
||||
return pGpuResource->__hdacodecInternalControlForward__(pGpuResource, command, pParams, size);
|
||||
}
|
||||
|
||||
static inline void hdacodecPreDestruct_DISPATCH(struct Hdacodec *pResource) {
|
||||
pResource->__hdacodecPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecUnmapFrom_DISPATCH(struct Hdacodec *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pResource->__hdacodecUnmapFrom__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void hdacodecControl_Epilogue_DISPATCH(struct Hdacodec *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__hdacodecControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecControlLookup_DISPATCH(struct Hdacodec *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__hdacodecControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NV_STATUS hdacodecMap_DISPATCH(struct Hdacodec *pGpuResource, struct CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, struct RsCpuMapping *pCpuMapping) {
|
||||
return pGpuResource->__hdacodecMap__(pGpuResource, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NvBool hdacodecAccessCallback_DISPATCH(struct Hdacodec *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__hdacodecAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
NV_STATUS hdacodecConstruct_IMPL(struct Hdacodec *arg_pHdacodecApi, struct CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
|
||||
#define __nvoc_hdacodecConstruct(arg_pHdacodecApi, arg_pCallContext, arg_pParams) hdacodecConstruct_IMPL(arg_pHdacodecApi, arg_pCallContext, arg_pParams)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_HDA_CODEC_API_NVOC_H_
|
||||
@@ -1,235 +0,0 @@
|
||||
#define NVOC_IO_VASPACE_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_io_vaspace_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x28ed9c = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJVASPACE;
|
||||
|
||||
void __nvoc_init_OBJIOVASPACE(OBJIOVASPACE*);
|
||||
void __nvoc_init_funcTable_OBJIOVASPACE(OBJIOVASPACE*);
|
||||
NV_STATUS __nvoc_ctor_OBJIOVASPACE(OBJIOVASPACE*);
|
||||
void __nvoc_init_dataField_OBJIOVASPACE(OBJIOVASPACE*);
|
||||
void __nvoc_dtor_OBJIOVASPACE(OBJIOVASPACE*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJIOVASPACE;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJIOVASPACE_OBJIOVASPACE = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJIOVASPACE,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJIOVASPACE,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJIOVASPACE_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJIOVASPACE_OBJVASPACE = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJVASPACE,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJIOVASPACE, __nvoc_base_OBJVASPACE),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJIOVASPACE = {
|
||||
/*numRelatives=*/ 3,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_OBJIOVASPACE_OBJIOVASPACE,
|
||||
&__nvoc_rtti_OBJIOVASPACE_OBJVASPACE,
|
||||
&__nvoc_rtti_OBJIOVASPACE_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJIOVASPACE =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(OBJIOVASPACE),
|
||||
/*classId=*/ classId(OBJIOVASPACE),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "OBJIOVASPACE",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJIOVASPACE,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_OBJIOVASPACE,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_OBJIOVASPACE
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceConstruct_(struct OBJVASPACE *pVAS, NvU32 classId, NvU32 vaspaceId, NvU64 vaStart, NvU64 vaLimit, NvU64 vaStartInternal, NvU64 vaLimitInternal, NvU32 flags) {
|
||||
return iovaspaceConstruct_((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), classId, vaspaceId, vaStart, vaLimit, vaStartInternal, vaLimitInternal, flags);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceAlloc(struct OBJVASPACE *pVAS, NvU64 size, NvU64 align, NvU64 rangeLo, NvU64 rangeHi, NvU64 pageSizeLockMask, VAS_ALLOC_FLAGS flags, NvU64 *pAddr) {
|
||||
return iovaspaceAlloc((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), size, align, rangeLo, rangeHi, pageSizeLockMask, flags, pAddr);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceFree(struct OBJVASPACE *pVAS, NvU64 vAddr) {
|
||||
return iovaspaceFree((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), vAddr);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceApplyDefaultAlignment(struct OBJVASPACE *pVAS, const FB_ALLOC_INFO *pAllocInfo, NvU64 *pAlign, NvU64 *pSize, NvU64 *pPageSizeLockMask) {
|
||||
return iovaspaceApplyDefaultAlignment((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pAllocInfo, pAlign, pSize, pPageSizeLockMask);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceIncAllocRefCnt(struct OBJVASPACE *pVAS, NvU64 vAddr) {
|
||||
return iovaspaceIncAllocRefCnt((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), vAddr);
|
||||
}
|
||||
|
||||
static NvU64 __nvoc_thunk_OBJIOVASPACE_vaspaceGetVaStart(struct OBJVASPACE *pVAS) {
|
||||
return iovaspaceGetVaStart((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset));
|
||||
}
|
||||
|
||||
static NvU64 __nvoc_thunk_OBJIOVASPACE_vaspaceGetVaLimit(struct OBJVASPACE *pVAS) {
|
||||
return iovaspaceGetVaLimit((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJIOVASPACE_vaspaceGetVasInfo(struct OBJVASPACE *pVAS, NV0080_CTRL_DMA_ADV_SCHED_GET_VA_CAPS_PARAMS *pParams) {
|
||||
return iovaspaceGetVasInfo((struct OBJIOVASPACE *)(((unsigned char *)pVAS) - __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset), pParams);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_OBJVASPACE_iovaspaceIsInternalVaRestricted(struct OBJIOVASPACE *pVAS) {
|
||||
return vaspaceIsInternalVaRestricted((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset));
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_OBJVASPACE_iovaspaceGetFlags(struct OBJIOVASPACE *pVAS) {
|
||||
return vaspaceGetFlags((struct OBJVASPACE *)(((unsigned char *)pVAS) + __nvoc_rtti_OBJIOVASPACE_OBJVASPACE.offset));
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJIOVASPACE =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_OBJVASPACE(OBJVASPACE*);
|
||||
void __nvoc_dtor_OBJIOVASPACE(OBJIOVASPACE *pThis) {
|
||||
__nvoc_iovaspaceDestruct(pThis);
|
||||
__nvoc_dtor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_OBJIOVASPACE(OBJIOVASPACE *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_OBJVASPACE(OBJVASPACE* );
|
||||
NV_STATUS __nvoc_ctor_OBJIOVASPACE(OBJIOVASPACE *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJIOVASPACE_fail_OBJVASPACE;
|
||||
__nvoc_init_dataField_OBJIOVASPACE(pThis);
|
||||
goto __nvoc_ctor_OBJIOVASPACE_exit; // Success
|
||||
|
||||
__nvoc_ctor_OBJIOVASPACE_fail_OBJVASPACE:
|
||||
__nvoc_ctor_OBJIOVASPACE_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_OBJIOVASPACE_1(OBJIOVASPACE *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__iovaspaceConstruct___ = &iovaspaceConstruct__IMPL;
|
||||
|
||||
pThis->__iovaspaceAlloc__ = &iovaspaceAlloc_IMPL;
|
||||
|
||||
pThis->__iovaspaceFree__ = &iovaspaceFree_IMPL;
|
||||
|
||||
pThis->__iovaspaceApplyDefaultAlignment__ = &iovaspaceApplyDefaultAlignment_IMPL;
|
||||
|
||||
pThis->__iovaspaceIncAllocRefCnt__ = &iovaspaceIncAllocRefCnt_IMPL;
|
||||
|
||||
pThis->__iovaspaceGetVaStart__ = &iovaspaceGetVaStart_IMPL;
|
||||
|
||||
pThis->__iovaspaceGetVaLimit__ = &iovaspaceGetVaLimit_IMPL;
|
||||
|
||||
pThis->__iovaspaceGetVasInfo__ = &iovaspaceGetVasInfo_IMPL;
|
||||
|
||||
pThis->__nvoc_base_OBJVASPACE.__vaspaceConstruct___ = &__nvoc_thunk_OBJIOVASPACE_vaspaceConstruct_;
|
||||
|
||||
pThis->__nvoc_base_OBJVASPACE.__vaspaceAlloc__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceAlloc;
|
||||
|
||||
pThis->__nvoc_base_OBJVASPACE.__vaspaceFree__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceFree;
|
||||
|
||||
pThis->__nvoc_base_OBJVASPACE.__vaspaceApplyDefaultAlignment__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceApplyDefaultAlignment;
|
||||
|
||||
pThis->__nvoc_base_OBJVASPACE.__vaspaceIncAllocRefCnt__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceIncAllocRefCnt;
|
||||
|
||||
pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVaStart__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceGetVaStart;
|
||||
|
||||
pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVaLimit__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceGetVaLimit;
|
||||
|
||||
pThis->__nvoc_base_OBJVASPACE.__vaspaceGetVasInfo__ = &__nvoc_thunk_OBJIOVASPACE_vaspaceGetVasInfo;
|
||||
|
||||
pThis->__iovaspaceIsInternalVaRestricted__ = &__nvoc_thunk_OBJVASPACE_iovaspaceIsInternalVaRestricted;
|
||||
|
||||
pThis->__iovaspaceGetFlags__ = &__nvoc_thunk_OBJVASPACE_iovaspaceGetFlags;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_OBJIOVASPACE(OBJIOVASPACE *pThis) {
|
||||
__nvoc_init_funcTable_OBJIOVASPACE_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_OBJVASPACE(OBJVASPACE*);
|
||||
void __nvoc_init_OBJIOVASPACE(OBJIOVASPACE *pThis) {
|
||||
pThis->__nvoc_pbase_OBJIOVASPACE = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_OBJVASPACE = &pThis->__nvoc_base_OBJVASPACE;
|
||||
__nvoc_init_OBJVASPACE(&pThis->__nvoc_base_OBJVASPACE);
|
||||
__nvoc_init_funcTable_OBJIOVASPACE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OBJIOVASPACE(OBJIOVASPACE **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
OBJIOVASPACE *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(OBJIOVASPACE));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(OBJIOVASPACE));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJIOVASPACE);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_OBJVASPACE.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_OBJIOVASPACE(pThis);
|
||||
status = __nvoc_ctor_OBJIOVASPACE(pThis);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_OBJIOVASPACE_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_OBJIOVASPACE_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OBJIOVASPACE(OBJIOVASPACE **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_OBJIOVASPACE(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,346 +0,0 @@
|
||||
#define NVOC_KERN_DISP_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_kern_disp_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x55952e = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
|
||||
|
||||
void __nvoc_init_KernelDisplay(KernelDisplay*, RmHalspecOwner* );
|
||||
void __nvoc_init_funcTable_KernelDisplay(KernelDisplay*, RmHalspecOwner* );
|
||||
NV_STATUS __nvoc_ctor_KernelDisplay(KernelDisplay*, RmHalspecOwner* );
|
||||
void __nvoc_init_dataField_KernelDisplay(KernelDisplay*, RmHalspecOwner* );
|
||||
void __nvoc_dtor_KernelDisplay(KernelDisplay*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelDisplay;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelDisplay_KernelDisplay = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_KernelDisplay,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelDisplay,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelDisplay_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelDisplay_OBJENGSTATE = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(KernelDisplay, __nvoc_base_OBJENGSTATE),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_KernelDisplay = {
|
||||
/*numRelatives=*/ 3,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_KernelDisplay_KernelDisplay,
|
||||
&__nvoc_rtti_KernelDisplay_OBJENGSTATE,
|
||||
&__nvoc_rtti_KernelDisplay_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(KernelDisplay),
|
||||
/*classId=*/ classId(KernelDisplay),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "KernelDisplay",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelDisplay,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_KernelDisplay,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_KernelDisplay
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_KernelDisplay_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, ENGDESCRIPTOR engDesc) {
|
||||
return kdispConstructEngine(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), engDesc);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStatePreInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) {
|
||||
return kdispStatePreInitLocked(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) {
|
||||
return kdispStateInitLocked(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_KernelDisplay_engstateStateDestroy(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay) {
|
||||
kdispStateDestroy(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStateLoad(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, NvU32 flags) {
|
||||
return kdispStateLoad(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), flags);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_KernelDisplay_engstateStateUnload(struct OBJGPU *pGpu, struct OBJENGSTATE *pKernelDisplay, NvU32 flags) {
|
||||
return kdispStateUnload(pGpu, (struct KernelDisplay *)(((unsigned char *)pKernelDisplay) - __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), flags);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispReconcileTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
|
||||
return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePreLoad(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePostUnload(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePreUnload(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStateInitUnlocked(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
|
||||
return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_kdispInitMissing(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
|
||||
engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePreInitUnlocked(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
|
||||
return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispGetTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
|
||||
return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispCompareTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunables1, void *pTunables2) {
|
||||
return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunables1, pTunables2);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_kdispFreeTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
|
||||
engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispStatePostLoad(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispAllocTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void **ppTunableState) {
|
||||
return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), ppTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_kdispSetTunableState(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
|
||||
return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_OBJENGSTATE_kdispIsPresent(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
|
||||
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_KernelDisplay_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelDisplay =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*);
|
||||
void __nvoc_dtor_KernelDisplay(KernelDisplay *pThis) {
|
||||
__nvoc_kdispDestruct(pThis);
|
||||
__nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
|
||||
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
|
||||
ChipHal *chipHal = &pRmhalspecowner->chipHal;
|
||||
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
|
||||
DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal;
|
||||
const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||
PORT_UNREFERENCED_VARIABLE(dispIpHal);
|
||||
PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx);
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_KDISP_IS_MISSING
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
else if (( ((rmVariantHal_HalVarIdx >> 5) == 0UL) && ((1UL << (rmVariantHal_HalVarIdx & 0x1f)) & 0x00000002UL) )) /* RmVariantHal: PF_KERNEL_ONLY */
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_KDISP_IS_MISSING, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_KDISP_IMP_ENABLE
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_KDISP_IMP_ENABLE, ((NvBool)(0 == 0)));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_KDISP_IMP_ENABLE, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
pThis->pStaticInfo = ((void *)0);
|
||||
|
||||
pThis->bWarPurgeSatellitesOnCoreFree = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
|
||||
NV_STATUS __nvoc_ctor_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
if (status != NV_OK) goto __nvoc_ctor_KernelDisplay_fail_OBJENGSTATE;
|
||||
__nvoc_init_dataField_KernelDisplay(pThis, pRmhalspecowner);
|
||||
goto __nvoc_ctor_KernelDisplay_exit; // Success
|
||||
|
||||
__nvoc_ctor_KernelDisplay_fail_OBJENGSTATE:
|
||||
__nvoc_ctor_KernelDisplay_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_KernelDisplay_1(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
|
||||
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
|
||||
ChipHal *chipHal = &pRmhalspecowner->chipHal;
|
||||
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
|
||||
DispIpHal *dispIpHal = &pRmhalspecowner->dispIpHal;
|
||||
const unsigned long dispIpHal_HalVarIdx = (unsigned long)dispIpHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||
PORT_UNREFERENCED_VARIABLE(dispIpHal);
|
||||
PORT_UNREFERENCED_VARIABLE(dispIpHal_HalVarIdx);
|
||||
|
||||
pThis->__kdispConstructEngine__ = &kdispConstructEngine_IMPL;
|
||||
|
||||
pThis->__kdispStatePreInitLocked__ = &kdispStatePreInitLocked_IMPL;
|
||||
|
||||
pThis->__kdispStateInitLocked__ = &kdispStateInitLocked_IMPL;
|
||||
|
||||
pThis->__kdispStateDestroy__ = &kdispStateDestroy_IMPL;
|
||||
|
||||
pThis->__kdispStateLoad__ = &kdispStateLoad_IMPL;
|
||||
|
||||
pThis->__kdispStateUnload__ = &kdispStateUnload_IMPL;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_KernelDisplay_engstateConstructEngine;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStatePreInitLocked__ = &__nvoc_thunk_KernelDisplay_engstateStatePreInitLocked;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_KernelDisplay_engstateStateInitLocked;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_KernelDisplay_engstateStateDestroy;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_KernelDisplay_engstateStateLoad;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_KernelDisplay_engstateStateUnload;
|
||||
|
||||
pThis->__kdispReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispReconcileTunableState;
|
||||
|
||||
pThis->__kdispStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePreLoad;
|
||||
|
||||
pThis->__kdispStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePostUnload;
|
||||
|
||||
pThis->__kdispStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePreUnload;
|
||||
|
||||
pThis->__kdispStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kdispStateInitUnlocked;
|
||||
|
||||
pThis->__kdispInitMissing__ = &__nvoc_thunk_OBJENGSTATE_kdispInitMissing;
|
||||
|
||||
pThis->__kdispStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePreInitUnlocked;
|
||||
|
||||
pThis->__kdispGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispGetTunableState;
|
||||
|
||||
pThis->__kdispCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispCompareTunableState;
|
||||
|
||||
pThis->__kdispFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispFreeTunableState;
|
||||
|
||||
pThis->__kdispStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_kdispStatePostLoad;
|
||||
|
||||
pThis->__kdispAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispAllocTunableState;
|
||||
|
||||
pThis->__kdispSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_kdispSetTunableState;
|
||||
|
||||
pThis->__kdispIsPresent__ = &__nvoc_thunk_OBJENGSTATE_kdispIsPresent;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
__nvoc_init_funcTable_KernelDisplay_1(pThis, pRmhalspecowner);
|
||||
}
|
||||
|
||||
void __nvoc_init_OBJENGSTATE(OBJENGSTATE*);
|
||||
void __nvoc_init_KernelDisplay(KernelDisplay *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
pThis->__nvoc_pbase_KernelDisplay = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE;
|
||||
__nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
__nvoc_init_funcTable_KernelDisplay(pThis, pRmhalspecowner);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_KernelDisplay(KernelDisplay **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
KernelDisplay *pThis;
|
||||
RmHalspecOwner *pRmhalspecowner;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(KernelDisplay));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(KernelDisplay));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelDisplay);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL)
|
||||
pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent);
|
||||
NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT);
|
||||
|
||||
__nvoc_init_KernelDisplay(pThis, pRmhalspecowner);
|
||||
status = __nvoc_ctor_KernelDisplay(pThis, pRmhalspecowner);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_KernelDisplay_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_KernelDisplay_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_KernelDisplay(KernelDisplay **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_KernelDisplay(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,642 +0,0 @@
|
||||
#ifndef _G_KERN_DISP_NVOC_H_
|
||||
#define _G_KERN_DISP_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "g_kern_disp_nvoc.h"
|
||||
|
||||
#ifndef KERN_DISP_H
|
||||
#define KERN_DISP_H
|
||||
|
||||
/******************************************************************************
|
||||
*
|
||||
* Kernel Display module header
|
||||
* This file contains functions managing display on CPU RM
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
#include "gpu/eng_state.h"
|
||||
#include "gpu/gpu_halspec.h"
|
||||
#include "gpu/disp/kern_disp_type.h"
|
||||
#include "gpu/disp/kern_disp_max.h"
|
||||
#include "gpu/mem_mgr/context_dma.h"
|
||||
#include "gpu/disp/vblank_callback/vblank.h"
|
||||
|
||||
#include "ctrl/ctrl2080/ctrl2080internal.h"
|
||||
|
||||
typedef NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS KernelDisplayStaticInfo;
|
||||
|
||||
struct DispChannel;
|
||||
|
||||
#ifndef __NVOC_CLASS_DispChannel_TYPEDEF__
|
||||
#define __NVOC_CLASS_DispChannel_TYPEDEF__
|
||||
typedef struct DispChannel DispChannel;
|
||||
#endif /* __NVOC_CLASS_DispChannel_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_DispChannel
|
||||
#define __nvoc_class_id_DispChannel 0xbd2ff3
|
||||
#endif /* __nvoc_class_id_DispChannel */
|
||||
|
||||
|
||||
struct RgLineCallback;
|
||||
|
||||
#ifndef __NVOC_CLASS_RgLineCallback_TYPEDEF__
|
||||
#define __NVOC_CLASS_RgLineCallback_TYPEDEF__
|
||||
typedef struct RgLineCallback RgLineCallback;
|
||||
#endif /* __NVOC_CLASS_RgLineCallback_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_RgLineCallback
|
||||
#define __nvoc_class_id_RgLineCallback 0xa3ff1c
|
||||
#endif /* __nvoc_class_id_RgLineCallback */
|
||||
|
||||
|
||||
|
||||
#define KDISP_GET_HEAD(pKernelDisplay, headID) (RMCFG_MODULE_KERNEL_HEAD ? kdispGetHead(pKernelDisplay, headID) : NULL)
|
||||
|
||||
/*!
|
||||
* KernelDisp is a logical abstraction of the GPU Display Engine. The
|
||||
* Public API of the Display Engine is exposed through this object, and any
|
||||
* interfaces which do not manage the underlying Display hardware can be
|
||||
* managed by this object.
|
||||
*/
|
||||
#ifdef NVOC_KERN_DISP_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct KernelDisplay {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct OBJENGSTATE __nvoc_base_OBJENGSTATE;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct OBJENGSTATE *__nvoc_pbase_OBJENGSTATE;
|
||||
struct KernelDisplay *__nvoc_pbase_KernelDisplay;
|
||||
NV_STATUS (*__kdispConstructEngine__)(struct OBJGPU *, struct KernelDisplay *, ENGDESCRIPTOR);
|
||||
NV_STATUS (*__kdispStatePreInitLocked__)(struct OBJGPU *, struct KernelDisplay *);
|
||||
NV_STATUS (*__kdispStateInitLocked__)(struct OBJGPU *, struct KernelDisplay *);
|
||||
void (*__kdispStateDestroy__)(struct OBJGPU *, struct KernelDisplay *);
|
||||
NV_STATUS (*__kdispStateLoad__)(struct OBJGPU *, struct KernelDisplay *, NvU32);
|
||||
NV_STATUS (*__kdispStateUnload__)(struct OBJGPU *, struct KernelDisplay *, NvU32);
|
||||
NV_STATUS (*__kdispReconcileTunableState__)(POBJGPU, struct KernelDisplay *, void *);
|
||||
NV_STATUS (*__kdispStatePreLoad__)(POBJGPU, struct KernelDisplay *, NvU32);
|
||||
NV_STATUS (*__kdispStatePostUnload__)(POBJGPU, struct KernelDisplay *, NvU32);
|
||||
NV_STATUS (*__kdispStatePreUnload__)(POBJGPU, struct KernelDisplay *, NvU32);
|
||||
NV_STATUS (*__kdispStateInitUnlocked__)(POBJGPU, struct KernelDisplay *);
|
||||
void (*__kdispInitMissing__)(POBJGPU, struct KernelDisplay *);
|
||||
NV_STATUS (*__kdispStatePreInitUnlocked__)(POBJGPU, struct KernelDisplay *);
|
||||
NV_STATUS (*__kdispGetTunableState__)(POBJGPU, struct KernelDisplay *, void *);
|
||||
NV_STATUS (*__kdispCompareTunableState__)(POBJGPU, struct KernelDisplay *, void *, void *);
|
||||
void (*__kdispFreeTunableState__)(POBJGPU, struct KernelDisplay *, void *);
|
||||
NV_STATUS (*__kdispStatePostLoad__)(POBJGPU, struct KernelDisplay *, NvU32);
|
||||
NV_STATUS (*__kdispAllocTunableState__)(POBJGPU, struct KernelDisplay *, void **);
|
||||
NV_STATUS (*__kdispSetTunableState__)(POBJGPU, struct KernelDisplay *, void *);
|
||||
NvBool (*__kdispIsPresent__)(POBJGPU, struct KernelDisplay *);
|
||||
NvBool PDB_PROP_KDISP_IMP_ENABLE;
|
||||
struct DisplayInstanceMemory *pInst;
|
||||
struct KernelHead *pKernelHead[4];
|
||||
const KernelDisplayStaticInfo *pStaticInfo;
|
||||
NvBool bWarPurgeSatellitesOnCoreFree;
|
||||
struct RgLineCallback *rgLineCallbackPerHead[4][2];
|
||||
NvU32 isrVblankHeads;
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_KernelDisplay_TYPEDEF__
|
||||
#define __NVOC_CLASS_KernelDisplay_TYPEDEF__
|
||||
typedef struct KernelDisplay KernelDisplay;
|
||||
#endif /* __NVOC_CLASS_KernelDisplay_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_KernelDisplay
|
||||
#define __nvoc_class_id_KernelDisplay 0x55952e
|
||||
#endif /* __nvoc_class_id_KernelDisplay */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelDisplay;
|
||||
|
||||
#define __staticCast_KernelDisplay(pThis) \
|
||||
((pThis)->__nvoc_pbase_KernelDisplay)
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
#define __dynamicCast_KernelDisplay(pThis) ((KernelDisplay*)NULL)
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define __dynamicCast_KernelDisplay(pThis) \
|
||||
((KernelDisplay*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelDisplay)))
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define PDB_PROP_KDISP_IS_MISSING_BASE_CAST __nvoc_base_OBJENGSTATE.
|
||||
#define PDB_PROP_KDISP_IS_MISSING_BASE_NAME PDB_PROP_ENGSTATE_IS_MISSING
|
||||
#define PDB_PROP_KDISP_IMP_ENABLE_BASE_CAST
|
||||
#define PDB_PROP_KDISP_IMP_ENABLE_BASE_NAME PDB_PROP_KDISP_IMP_ENABLE
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_KernelDisplay(KernelDisplay**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_KernelDisplay(KernelDisplay**, Dynamic*, NvU32);
|
||||
#define __objCreate_KernelDisplay(ppNewObj, pParent, createFlags) \
|
||||
__nvoc_objCreate_KernelDisplay((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
|
||||
|
||||
#define kdispConstructEngine(pGpu, pKernelDisplay, engDesc) kdispConstructEngine_DISPATCH(pGpu, pKernelDisplay, engDesc)
|
||||
#define kdispStatePreInitLocked(pGpu, pKernelDisplay) kdispStatePreInitLocked_DISPATCH(pGpu, pKernelDisplay)
|
||||
#define kdispStateInitLocked(pGpu, pKernelDisplay) kdispStateInitLocked_DISPATCH(pGpu, pKernelDisplay)
|
||||
#define kdispStateDestroy(pGpu, pKernelDisplay) kdispStateDestroy_DISPATCH(pGpu, pKernelDisplay)
|
||||
#define kdispStateLoad(pGpu, pKernelDisplay, flags) kdispStateLoad_DISPATCH(pGpu, pKernelDisplay, flags)
|
||||
#define kdispStateUnload(pGpu, pKernelDisplay, flags) kdispStateUnload_DISPATCH(pGpu, pKernelDisplay, flags)
|
||||
#define kdispReconcileTunableState(pGpu, pEngstate, pTunableState) kdispReconcileTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
|
||||
#define kdispStatePreLoad(pGpu, pEngstate, arg0) kdispStatePreLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define kdispStatePostUnload(pGpu, pEngstate, arg0) kdispStatePostUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define kdispStatePreUnload(pGpu, pEngstate, arg0) kdispStatePreUnload_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define kdispStateInitUnlocked(pGpu, pEngstate) kdispStateInitUnlocked_DISPATCH(pGpu, pEngstate)
|
||||
#define kdispInitMissing(pGpu, pEngstate) kdispInitMissing_DISPATCH(pGpu, pEngstate)
|
||||
#define kdispStatePreInitUnlocked(pGpu, pEngstate) kdispStatePreInitUnlocked_DISPATCH(pGpu, pEngstate)
|
||||
#define kdispGetTunableState(pGpu, pEngstate, pTunableState) kdispGetTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
|
||||
#define kdispCompareTunableState(pGpu, pEngstate, pTunables1, pTunables2) kdispCompareTunableState_DISPATCH(pGpu, pEngstate, pTunables1, pTunables2)
|
||||
#define kdispFreeTunableState(pGpu, pEngstate, pTunableState) kdispFreeTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
|
||||
#define kdispStatePostLoad(pGpu, pEngstate, arg0) kdispStatePostLoad_DISPATCH(pGpu, pEngstate, arg0)
|
||||
#define kdispAllocTunableState(pGpu, pEngstate, ppTunableState) kdispAllocTunableState_DISPATCH(pGpu, pEngstate, ppTunableState)
|
||||
#define kdispSetTunableState(pGpu, pEngstate, pTunableState) kdispSetTunableState_DISPATCH(pGpu, pEngstate, pTunableState)
|
||||
#define kdispIsPresent(pGpu, pEngstate) kdispIsPresent_DISPATCH(pGpu, pEngstate)
|
||||
NV_STATUS kdispConstructInstMem_IMPL(struct KernelDisplay *pKernelDisplay);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NV_STATUS kdispConstructInstMem(struct KernelDisplay *pKernelDisplay) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispConstructInstMem(pKernelDisplay) kdispConstructInstMem_IMPL(pKernelDisplay)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispConstructInstMem_HAL(pKernelDisplay) kdispConstructInstMem(pKernelDisplay)
|
||||
|
||||
void kdispDestructInstMem_IMPL(struct KernelDisplay *pKernelDisplay);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline void kdispDestructInstMem(struct KernelDisplay *pKernelDisplay) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispDestructInstMem(pKernelDisplay) kdispDestructInstMem_IMPL(pKernelDisplay)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispDestructInstMem_HAL(pKernelDisplay) kdispDestructInstMem(pKernelDisplay)
|
||||
|
||||
NV_STATUS kdispSelectClass_v03_00_KERNEL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 swClass);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NV_STATUS kdispSelectClass(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 swClass) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispSelectClass(pGpu, pKernelDisplay, swClass) kdispSelectClass_v03_00_KERNEL(pGpu, pKernelDisplay, swClass)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispSelectClass_HAL(pGpu, pKernelDisplay, swClass) kdispSelectClass(pGpu, pKernelDisplay, swClass)
|
||||
|
||||
NvS32 kdispGetBaseOffset_v04_02(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NvS32 kdispGetBaseOffset(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return 0;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispGetBaseOffset(pGpu, pKernelDisplay) kdispGetBaseOffset_v04_02(pGpu, pKernelDisplay)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispGetBaseOffset_HAL(pGpu, pKernelDisplay) kdispGetBaseOffset(pGpu, pKernelDisplay)
|
||||
|
||||
NV_STATUS kdispGetChannelNum_v03_00(struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pChannelNum);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NV_STATUS kdispGetChannelNum(struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pChannelNum) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispGetChannelNum(pKernelDisplay, channelClass, channelInstance, pChannelNum) kdispGetChannelNum_v03_00(pKernelDisplay, channelClass, channelInstance, pChannelNum)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispGetChannelNum_HAL(pKernelDisplay, channelClass, channelInstance, pChannelNum) kdispGetChannelNum(pKernelDisplay, channelClass, channelInstance, pChannelNum)
|
||||
|
||||
void kdispGetDisplayCapsBaseAndSize_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline void kdispGetDisplayCapsBaseAndSize(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispGetDisplayCapsBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplayCapsBaseAndSize_v03_00(pGpu, pKernelDisplay, pOffset, pSize)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispGetDisplayCapsBaseAndSize_HAL(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplayCapsBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize)
|
||||
|
||||
void kdispGetDisplaySfUserBaseAndSize_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline void kdispGetDisplaySfUserBaseAndSize(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 *pOffset, NvU32 *pSize) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispGetDisplaySfUserBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplaySfUserBaseAndSize_v03_00(pGpu, pKernelDisplay, pOffset, pSize)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispGetDisplaySfUserBaseAndSize_HAL(pGpu, pKernelDisplay, pOffset, pSize) kdispGetDisplaySfUserBaseAndSize(pGpu, pKernelDisplay, pOffset, pSize)
|
||||
|
||||
NV_STATUS kdispGetDisplayChannelUserBaseAndSize_v03_00(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pOffset, NvU32 *pSize);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NV_STATUS kdispGetDisplayChannelUserBaseAndSize(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, DISPCHNCLASS channelClass, NvU32 channelInstance, NvU32 *pOffset, NvU32 *pSize) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispGetDisplayChannelUserBaseAndSize(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) kdispGetDisplayChannelUserBaseAndSize_v03_00(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispGetDisplayChannelUserBaseAndSize_HAL(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize) kdispGetDisplayChannelUserBaseAndSize(pGpu, pKernelDisplay, channelClass, channelInstance, pOffset, pSize)
|
||||
|
||||
NV_STATUS kdispImportImpData_IMPL(struct KernelDisplay *pKernelDisplay);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NV_STATUS kdispImportImpData(struct KernelDisplay *pKernelDisplay) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispImportImpData(pKernelDisplay) kdispImportImpData_IMPL(pKernelDisplay)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispImportImpData_HAL(pKernelDisplay) kdispImportImpData(pKernelDisplay)
|
||||
|
||||
NV_STATUS kdispArbAndAllocDisplayBandwidth_v04_02(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, enum DISPLAY_ICC_BW_CLIENT iccBwClient, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NV_STATUS kdispArbAndAllocDisplayBandwidth(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, enum DISPLAY_ICC_BW_CLIENT iccBwClient, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispArbAndAllocDisplayBandwidth(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispArbAndAllocDisplayBandwidth_v04_02(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispArbAndAllocDisplayBandwidth_HAL(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispArbAndAllocDisplayBandwidth(pGpu, pKernelDisplay, iccBwClient, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS)
|
||||
|
||||
NV_STATUS kdispSetPushBufferParamsToPhysical_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvHandle hObjectBuffer, struct ContextDma *pBufferContextDma, NvU32 hClass, NvU32 channelInstance, DISPCHNCLASS internalDispChnClass);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NV_STATUS kdispSetPushBufferParamsToPhysical(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvHandle hObjectBuffer, struct ContextDma *pBufferContextDma, NvU32 hClass, NvU32 channelInstance, DISPCHNCLASS internalDispChnClass) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispSetPushBufferParamsToPhysical(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass) kdispSetPushBufferParamsToPhysical_IMPL(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispSetPushBufferParamsToPhysical_HAL(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass) kdispSetPushBufferParamsToPhysical(pGpu, pKernelDisplay, pDispChannel, hObjectBuffer, pBufferContextDma, hClass, channelInstance, internalDispChnClass)
|
||||
|
||||
static inline NV_STATUS kdispAcquireDispChannelHw_56cd7a(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvU32 channelInstance, NvHandle hObjectBuffer, NvU32 initialGetPutOffset, NvBool allowGrabWithinSameClient, NvBool connectPbAtGrab) {
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NV_STATUS kdispAcquireDispChannelHw(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel, NvU32 channelInstance, NvHandle hObjectBuffer, NvU32 initialGetPutOffset, NvBool allowGrabWithinSameClient, NvBool connectPbAtGrab) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispAcquireDispChannelHw(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) kdispAcquireDispChannelHw_56cd7a(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispAcquireDispChannelHw_HAL(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab) kdispAcquireDispChannelHw(pKernelDisplay, pDispChannel, channelInstance, hObjectBuffer, initialGetPutOffset, allowGrabWithinSameClient, connectPbAtGrab)
|
||||
|
||||
static inline NV_STATUS kdispReleaseDispChannelHw_56cd7a(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) {
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NV_STATUS kdispReleaseDispChannelHw(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispReleaseDispChannelHw(pKernelDisplay, pDispChannel) kdispReleaseDispChannelHw_56cd7a(pKernelDisplay, pDispChannel)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispReleaseDispChannelHw_HAL(pKernelDisplay, pDispChannel) kdispReleaseDispChannelHw(pKernelDisplay, pDispChannel)
|
||||
|
||||
NV_STATUS kdispMapDispChannel_IMPL(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NV_STATUS kdispMapDispChannel(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispMapDispChannel(pKernelDisplay, pDispChannel) kdispMapDispChannel_IMPL(pKernelDisplay, pDispChannel)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispMapDispChannel_HAL(pKernelDisplay, pDispChannel) kdispMapDispChannel(pKernelDisplay, pDispChannel)
|
||||
|
||||
void kdispUnbindUnmapDispChannel_IMPL(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline void kdispUnbindUnmapDispChannel(struct KernelDisplay *pKernelDisplay, struct DispChannel *pDispChannel) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispUnbindUnmapDispChannel(pKernelDisplay, pDispChannel) kdispUnbindUnmapDispChannel_IMPL(pKernelDisplay, pDispChannel)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispUnbindUnmapDispChannel_HAL(pKernelDisplay, pDispChannel) kdispUnbindUnmapDispChannel(pKernelDisplay, pDispChannel)
|
||||
|
||||
NV_STATUS kdispRegisterRgLineCallback_IMPL(struct KernelDisplay *pKernelDisplay, struct RgLineCallback *pRgLineCallback, NvU32 head, NvU32 rgIntrLine, NvBool bEnable);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NV_STATUS kdispRegisterRgLineCallback(struct KernelDisplay *pKernelDisplay, struct RgLineCallback *pRgLineCallback, NvU32 head, NvU32 rgIntrLine, NvBool bEnable) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispRegisterRgLineCallback(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) kdispRegisterRgLineCallback_IMPL(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispRegisterRgLineCallback_HAL(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable) kdispRegisterRgLineCallback(pKernelDisplay, pRgLineCallback, head, rgIntrLine, bEnable)
|
||||
|
||||
void kdispInvokeRgLineCallback_KERNEL(struct KernelDisplay *pKernelDisplay, NvU32 head, NvU32 rgIntrLine, NvBool bIsIrqlIsr);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline void kdispInvokeRgLineCallback(struct KernelDisplay *pKernelDisplay, NvU32 head, NvU32 rgIntrLine, NvBool bIsIrqlIsr) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispInvokeRgLineCallback(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) kdispInvokeRgLineCallback_KERNEL(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispInvokeRgLineCallback_HAL(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr) kdispInvokeRgLineCallback(pKernelDisplay, head, rgIntrLine, bIsIrqlIsr)
|
||||
|
||||
void kdispServiceVblank_KERNEL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 arg0, NvU32 arg1, THREAD_STATE_NODE *arg2);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline void kdispServiceVblank(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 arg0, NvU32 arg1, THREAD_STATE_NODE *arg2) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispServiceVblank(pGpu, pKernelDisplay, arg0, arg1, arg2) kdispServiceVblank_KERNEL(pGpu, pKernelDisplay, arg0, arg1, arg2)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispServiceVblank_HAL(pGpu, pKernelDisplay, arg0, arg1, arg2) kdispServiceVblank(pGpu, pKernelDisplay, arg0, arg1, arg2)
|
||||
|
||||
NvU32 kdispReadPendingVblank_KERNEL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg0);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NvU32 kdispReadPendingVblank(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, THREAD_STATE_NODE *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return 0;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispReadPendingVblank(pGpu, pKernelDisplay, arg0) kdispReadPendingVblank_KERNEL(pGpu, pKernelDisplay, arg0)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispReadPendingVblank_HAL(pGpu, pKernelDisplay, arg0) kdispReadPendingVblank(pGpu, pKernelDisplay, arg0)
|
||||
|
||||
static inline NvBool kdispGetVgaWorkspaceBase_ceaee8(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU64 *pOffset) {
|
||||
NV_ASSERT_PRECOMP(0);
|
||||
return ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NvBool kdispGetVgaWorkspaceBase(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU64 *pOffset) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_FALSE;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispGetVgaWorkspaceBase(pGpu, pKernelDisplay, pOffset) kdispGetVgaWorkspaceBase_ceaee8(pGpu, pKernelDisplay, pOffset)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispGetVgaWorkspaceBase_HAL(pGpu, pKernelDisplay, pOffset) kdispGetVgaWorkspaceBase(pGpu, pKernelDisplay, pOffset)
|
||||
|
||||
void kdispInvokeDisplayModesetCallback_KERNEL(struct KernelDisplay *pKernelDisplay, NvBool bModesetStart, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS);
|
||||
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline void kdispInvokeDisplayModesetCallback(struct KernelDisplay *pKernelDisplay, NvBool bModesetStart, NvU32 minRequiredIsoBandwidthKBPS, NvU32 minRequiredFloorBandwidthKBPS) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispInvokeDisplayModesetCallback(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispInvokeDisplayModesetCallback_KERNEL(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#define kdispInvokeDisplayModesetCallback_HAL(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS) kdispInvokeDisplayModesetCallback(pKernelDisplay, bModesetStart, minRequiredIsoBandwidthKBPS, minRequiredFloorBandwidthKBPS)
|
||||
|
||||
NV_STATUS kdispConstructEngine_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, ENGDESCRIPTOR engDesc);
|
||||
|
||||
static inline NV_STATUS kdispConstructEngine_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, ENGDESCRIPTOR engDesc) {
|
||||
return pKernelDisplay->__kdispConstructEngine__(pGpu, pKernelDisplay, engDesc);
|
||||
}
|
||||
|
||||
NV_STATUS kdispStatePreInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay);
|
||||
|
||||
static inline NV_STATUS kdispStatePreInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) {
|
||||
return pKernelDisplay->__kdispStatePreInitLocked__(pGpu, pKernelDisplay);
|
||||
}
|
||||
|
||||
NV_STATUS kdispStateInitLocked_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay);
|
||||
|
||||
static inline NV_STATUS kdispStateInitLocked_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) {
|
||||
return pKernelDisplay->__kdispStateInitLocked__(pGpu, pKernelDisplay);
|
||||
}
|
||||
|
||||
void kdispStateDestroy_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay);
|
||||
|
||||
static inline void kdispStateDestroy_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay) {
|
||||
pKernelDisplay->__kdispStateDestroy__(pGpu, pKernelDisplay);
|
||||
}
|
||||
|
||||
NV_STATUS kdispStateLoad_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags);
|
||||
|
||||
static inline NV_STATUS kdispStateLoad_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags) {
|
||||
return pKernelDisplay->__kdispStateLoad__(pGpu, pKernelDisplay, flags);
|
||||
}
|
||||
|
||||
NV_STATUS kdispStateUnload_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags);
|
||||
|
||||
static inline NV_STATUS kdispStateUnload_DISPATCH(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 flags) {
|
||||
return pKernelDisplay->__kdispStateUnload__(pGpu, pKernelDisplay, flags);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kdispReconcileTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
|
||||
return pEngstate->__kdispReconcileTunableState__(pGpu, pEngstate, pTunableState);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kdispStatePreLoad_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__kdispStatePreLoad__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kdispStatePostUnload_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__kdispStatePostUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kdispStatePreUnload_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__kdispStatePreUnload__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kdispStateInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
|
||||
return pEngstate->__kdispStateInitUnlocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline void kdispInitMissing_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
|
||||
pEngstate->__kdispInitMissing__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kdispStatePreInitUnlocked_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
|
||||
return pEngstate->__kdispStatePreInitUnlocked__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kdispGetTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
|
||||
return pEngstate->__kdispGetTunableState__(pGpu, pEngstate, pTunableState);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kdispCompareTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunables1, void *pTunables2) {
|
||||
return pEngstate->__kdispCompareTunableState__(pGpu, pEngstate, pTunables1, pTunables2);
|
||||
}
|
||||
|
||||
static inline void kdispFreeTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
|
||||
pEngstate->__kdispFreeTunableState__(pGpu, pEngstate, pTunableState);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kdispStatePostLoad_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, NvU32 arg0) {
|
||||
return pEngstate->__kdispStatePostLoad__(pGpu, pEngstate, arg0);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kdispAllocTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void **ppTunableState) {
|
||||
return pEngstate->__kdispAllocTunableState__(pGpu, pEngstate, ppTunableState);
|
||||
}
|
||||
|
||||
static inline NV_STATUS kdispSetTunableState_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate, void *pTunableState) {
|
||||
return pEngstate->__kdispSetTunableState__(pGpu, pEngstate, pTunableState);
|
||||
}
|
||||
|
||||
static inline NvBool kdispIsPresent_DISPATCH(POBJGPU pGpu, struct KernelDisplay *pEngstate) {
|
||||
return pEngstate->__kdispIsPresent__(pGpu, pEngstate);
|
||||
}
|
||||
|
||||
void kdispDestruct_IMPL(struct KernelDisplay *pKernelDisplay);
|
||||
#define __nvoc_kdispDestruct(pKernelDisplay) kdispDestruct_IMPL(pKernelDisplay)
|
||||
NV_STATUS kdispConstructKhead_IMPL(struct KernelDisplay *pKernelDisplay);
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NV_STATUS kdispConstructKhead(struct KernelDisplay *pKernelDisplay) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispConstructKhead(pKernelDisplay) kdispConstructKhead_IMPL(pKernelDisplay)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
void kdispDestructKhead_IMPL(struct KernelDisplay *pKernelDisplay);
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline void kdispDestructKhead(struct KernelDisplay *pKernelDisplay) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispDestructKhead(pKernelDisplay) kdispDestructKhead_IMPL(pKernelDisplay)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
NV_STATUS kdispGetIntChnClsForHwCls_IMPL(struct KernelDisplay *pKernelDisplay, NvU32 hwClass, DISPCHNCLASS *pDispChnClass);
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline NV_STATUS kdispGetIntChnClsForHwCls(struct KernelDisplay *pKernelDisplay, NvU32 hwClass, DISPCHNCLASS *pDispChnClass) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispGetIntChnClsForHwCls(pKernelDisplay, hwClass, pDispChnClass) kdispGetIntChnClsForHwCls_IMPL(pKernelDisplay, hwClass, pDispChnClass)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
void kdispNotifyEvent_IMPL(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16);
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline void kdispNotifyEvent(struct OBJGPU *pGpu, struct KernelDisplay *pKernelDisplay, NvU32 notifyIndex, void *pNotifyParams, NvU32 notifyParamsSize, NvV32 info32, NvV16 info16) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispNotifyEvent(pGpu, pKernelDisplay, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16) kdispNotifyEvent_IMPL(pGpu, pKernelDisplay, notifyIndex, pNotifyParams, notifyParamsSize, info32, info16)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
void kdispSetWarPurgeSatellitesOnCoreFree_IMPL(struct KernelDisplay *pKernelDisplay, NvBool value);
|
||||
#ifdef __nvoc_kern_disp_h_disabled
|
||||
static inline void kdispSetWarPurgeSatellitesOnCoreFree(struct KernelDisplay *pKernelDisplay, NvBool value) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelDisplay was disabled!");
|
||||
}
|
||||
#else //__nvoc_kern_disp_h_disabled
|
||||
#define kdispSetWarPurgeSatellitesOnCoreFree(pKernelDisplay, value) kdispSetWarPurgeSatellitesOnCoreFree_IMPL(pKernelDisplay, value)
|
||||
#endif //__nvoc_kern_disp_h_disabled
|
||||
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
void
|
||||
dispdeviceFillVgaSavedDisplayState( struct OBJGPU *pGpu,
|
||||
NvU64 vgaAddr,
|
||||
NvU8 vgaMemType,
|
||||
NvBool vgaValid,
|
||||
NvU64 workspaceAddr,
|
||||
NvU8 workspaceMemType,
|
||||
NvBool workspaceValid,
|
||||
NvBool baseValid,
|
||||
NvBool workspaceBaseValid
|
||||
);
|
||||
|
||||
static NV_INLINE struct KernelHead*
|
||||
kdispGetHead
|
||||
(
|
||||
struct KernelDisplay *pKernelDisplay,
|
||||
NvU32 head
|
||||
)
|
||||
{
|
||||
if (head >= OBJ_MAX_HEADS)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return pKernelDisplay->pKernelHead[head];
|
||||
}
|
||||
|
||||
static NV_INLINE NvU32
|
||||
kdispGetNumHeads(struct KernelDisplay *pKernelDisplay)
|
||||
{
|
||||
NV_ASSERT(pKernelDisplay->pStaticInfo != NULL);
|
||||
return pKernelDisplay->pStaticInfo->numHeads;
|
||||
}
|
||||
|
||||
static NV_INLINE NvU32
|
||||
kdispGetIsPrimaryVga(struct KernelDisplay *pKernelDisplay)
|
||||
{
|
||||
NV_ASSERT(pKernelDisplay->pStaticInfo != NULL);
|
||||
return pKernelDisplay->pStaticInfo->bPrimaryVga;
|
||||
}
|
||||
#endif // KERN_DISP_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_KERN_DISP_NVOC_H_
|
||||
@@ -1,176 +0,0 @@
|
||||
#define NVOC_KERNEL_HEAD_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_kernel_head_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x0145e6 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
void __nvoc_init_KernelHead(KernelHead*, RmHalspecOwner* );
|
||||
void __nvoc_init_funcTable_KernelHead(KernelHead*, RmHalspecOwner* );
|
||||
NV_STATUS __nvoc_ctor_KernelHead(KernelHead*, RmHalspecOwner* );
|
||||
void __nvoc_init_dataField_KernelHead(KernelHead*, RmHalspecOwner* );
|
||||
void __nvoc_dtor_KernelHead(KernelHead*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelHead;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelHead_KernelHead = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_KernelHead,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_KernelHead,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_KernelHead_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(KernelHead, __nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_KernelHead = {
|
||||
/*numRelatives=*/ 2,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_KernelHead_KernelHead,
|
||||
&__nvoc_rtti_KernelHead_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(KernelHead),
|
||||
/*classId=*/ classId(KernelHead),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "KernelHead",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_KernelHead,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_KernelHead,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_KernelHead
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_KernelHead =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Object(Object*);
|
||||
void __nvoc_dtor_KernelHead(KernelHead *pThis) {
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
|
||||
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
|
||||
ChipHal *chipHal = &pRmhalspecowner->chipHal;
|
||||
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Object(Object* );
|
||||
NV_STATUS __nvoc_ctor_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
|
||||
if (status != NV_OK) goto __nvoc_ctor_KernelHead_fail_Object;
|
||||
__nvoc_init_dataField_KernelHead(pThis, pRmhalspecowner);
|
||||
|
||||
status = __nvoc_kheadConstruct(pThis);
|
||||
if (status != NV_OK) goto __nvoc_ctor_KernelHead_fail__init;
|
||||
goto __nvoc_ctor_KernelHead_exit; // Success
|
||||
|
||||
__nvoc_ctor_KernelHead_fail__init:
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_ctor_KernelHead_fail_Object:
|
||||
__nvoc_ctor_KernelHead_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_KernelHead_1(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
|
||||
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
|
||||
ChipHal *chipHal = &pRmhalspecowner->chipHal;
|
||||
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
__nvoc_init_funcTable_KernelHead_1(pThis, pRmhalspecowner);
|
||||
}
|
||||
|
||||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_KernelHead(KernelHead *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
pThis->__nvoc_pbase_KernelHead = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
|
||||
__nvoc_init_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_init_funcTable_KernelHead(pThis, pRmhalspecowner);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_KernelHead(KernelHead **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
KernelHead *pThis;
|
||||
RmHalspecOwner *pRmhalspecowner;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(KernelHead));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(KernelHead));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_KernelHead);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL)
|
||||
pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent);
|
||||
NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT);
|
||||
|
||||
__nvoc_init_KernelHead(pThis, pRmhalspecowner);
|
||||
status = __nvoc_ctor_KernelHead(pThis, pRmhalspecowner);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_KernelHead_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_KernelHead_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_KernelHead(KernelHead **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_KernelHead(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,354 +0,0 @@
|
||||
#ifndef _G_KERNEL_HEAD_NVOC_H_
|
||||
#define _G_KERNEL_HEAD_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**************************** Kernelhead Routines **************************\
|
||||
* *
|
||||
* Kernel head object function Definitions. *
|
||||
* *
|
||||
\***************************************************************************/
|
||||
|
||||
#include "g_kernel_head_nvoc.h"
|
||||
|
||||
#ifndef KERNEL_HEAD_H
|
||||
#define KERNEL_HEAD_H
|
||||
|
||||
/* ------------------------ Includes --------------------------------------- */
|
||||
#include "gpu/disp/vblank_callback/vblank.h"
|
||||
#include "gpu/gpu_halspec.h"
|
||||
/* ------------------------ Types definitions ------------------------------ */
|
||||
enum
|
||||
{
|
||||
headIntr_none = 0,
|
||||
headIntr_vblank = NVBIT(0),
|
||||
};
|
||||
|
||||
/* ------------------------ Macros & Defines ------------------------------- */
|
||||
|
||||
#ifdef NVOC_KERNEL_HEAD_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct __nvoc_inner_struc_KernelHead_1__ {
|
||||
struct {
|
||||
NvU32 Total;
|
||||
NvU32 LowLatency;
|
||||
NvU32 NormLatency;
|
||||
} Counters;
|
||||
struct {
|
||||
VBLANKCALLBACK *pListLL;
|
||||
VBLANKCALLBACK *pListNL;
|
||||
} Callback;
|
||||
NvU32 IntrState;
|
||||
};
|
||||
|
||||
|
||||
struct KernelHead {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct Object __nvoc_base_Object;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct KernelHead *__nvoc_pbase_KernelHead;
|
||||
struct __nvoc_inner_struc_KernelHead_1__ Vblank;
|
||||
NvU32 PublicId;
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_KernelHead_TYPEDEF__
|
||||
#define __NVOC_CLASS_KernelHead_TYPEDEF__
|
||||
typedef struct KernelHead KernelHead;
|
||||
#endif /* __NVOC_CLASS_KernelHead_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_KernelHead
|
||||
#define __nvoc_class_id_KernelHead 0x0145e6
|
||||
#endif /* __nvoc_class_id_KernelHead */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_KernelHead;
|
||||
|
||||
#define __staticCast_KernelHead(pThis) \
|
||||
((pThis)->__nvoc_pbase_KernelHead)
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
#define __dynamicCast_KernelHead(pThis) ((KernelHead*)NULL)
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define __dynamicCast_KernelHead(pThis) \
|
||||
((KernelHead*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(KernelHead)))
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_KernelHead(KernelHead**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_KernelHead(KernelHead**, Dynamic*, NvU32);
|
||||
#define __objCreate_KernelHead(ppNewObj, pParent, createFlags) \
|
||||
__nvoc_objCreate_KernelHead((ppNewObj), staticCast((pParent), Dynamic), (createFlags))
|
||||
|
||||
NvU32 kheadGetVblankTotalCounter_IMPL(struct KernelHead *pKernelHead);
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline NvU32 kheadGetVblankTotalCounter(struct KernelHead *pKernelHead) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
return 0;
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadGetVblankTotalCounter(pKernelHead) kheadGetVblankTotalCounter_IMPL(pKernelHead)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#define kheadGetVblankTotalCounter_HAL(pKernelHead) kheadGetVblankTotalCounter(pKernelHead)
|
||||
|
||||
void kheadSetVblankTotalCounter_IMPL(struct KernelHead *pKernelHead, NvU32 arg0);
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline void kheadSetVblankTotalCounter(struct KernelHead *pKernelHead, NvU32 arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadSetVblankTotalCounter(pKernelHead, arg0) kheadSetVblankTotalCounter_IMPL(pKernelHead, arg0)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#define kheadSetVblankTotalCounter_HAL(pKernelHead, arg0) kheadSetVblankTotalCounter(pKernelHead, arg0)
|
||||
|
||||
NvU32 kheadGetVblankLowLatencyCounter_IMPL(struct KernelHead *pKernelHead);
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline NvU32 kheadGetVblankLowLatencyCounter(struct KernelHead *pKernelHead) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
return 0;
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadGetVblankLowLatencyCounter(pKernelHead) kheadGetVblankLowLatencyCounter_IMPL(pKernelHead)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#define kheadGetVblankLowLatencyCounter_HAL(pKernelHead) kheadGetVblankLowLatencyCounter(pKernelHead)
|
||||
|
||||
void kheadSetVblankLowLatencyCounter_IMPL(struct KernelHead *pKernelHead, NvU32 arg0);
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline void kheadSetVblankLowLatencyCounter(struct KernelHead *pKernelHead, NvU32 arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadSetVblankLowLatencyCounter(pKernelHead, arg0) kheadSetVblankLowLatencyCounter_IMPL(pKernelHead, arg0)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#define kheadSetVblankLowLatencyCounter_HAL(pKernelHead, arg0) kheadSetVblankLowLatencyCounter(pKernelHead, arg0)
|
||||
|
||||
static inline NvU32 kheadGetVblankNormLatencyCounter_46f6a7(struct KernelHead *pKernelHead) {
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline NvU32 kheadGetVblankNormLatencyCounter(struct KernelHead *pKernelHead) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
return 0;
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadGetVblankNormLatencyCounter(pKernelHead) kheadGetVblankNormLatencyCounter_46f6a7(pKernelHead)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#define kheadGetVblankNormLatencyCounter_HAL(pKernelHead) kheadGetVblankNormLatencyCounter(pKernelHead)
|
||||
|
||||
static inline void kheadSetVblankNormLatencyCounter_b3696a(struct KernelHead *pKernelHead, NvU32 arg0) {
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline void kheadSetVblankNormLatencyCounter(struct KernelHead *pKernelHead, NvU32 arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadSetVblankNormLatencyCounter(pKernelHead, arg0) kheadSetVblankNormLatencyCounter_b3696a(pKernelHead, arg0)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#define kheadSetVblankNormLatencyCounter_HAL(pKernelHead, arg0) kheadSetVblankNormLatencyCounter(pKernelHead, arg0)
|
||||
|
||||
static inline NvBool kheadReadVblankIntrEnable_491d52(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) {
|
||||
return ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline NvBool kheadReadVblankIntrEnable(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
return NV_FALSE;
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadReadVblankIntrEnable(pGpu, pKernelHead) kheadReadVblankIntrEnable_491d52(pGpu, pKernelHead)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#define kheadReadVblankIntrEnable_HAL(pGpu, pKernelHead) kheadReadVblankIntrEnable(pGpu, pKernelHead)
|
||||
|
||||
static inline NvBool kheadGetDisplayInitialized_491d52(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) {
|
||||
return ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline NvBool kheadGetDisplayInitialized(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
return NV_FALSE;
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadGetDisplayInitialized(pGpu, pKernelHead) kheadGetDisplayInitialized_491d52(pGpu, pKernelHead)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#define kheadGetDisplayInitialized_HAL(pGpu, pKernelHead) kheadGetDisplayInitialized(pGpu, pKernelHead)
|
||||
|
||||
static inline void kheadWriteVblankIntrEnable_b3696a(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvBool arg0) {
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline void kheadWriteVblankIntrEnable(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvBool arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadWriteVblankIntrEnable(pGpu, pKernelHead, arg0) kheadWriteVblankIntrEnable_b3696a(pGpu, pKernelHead, arg0)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#define kheadWriteVblankIntrEnable_HAL(pGpu, pKernelHead, arg0) kheadWriteVblankIntrEnable(pGpu, pKernelHead, arg0)
|
||||
|
||||
static inline void kheadProcessVblankCallbacks_e426af(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0) {
|
||||
NV_ASSERT_PRECOMP(0);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline void kheadProcessVblankCallbacks(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadProcessVblankCallbacks(pGpu, pKernelHead, arg0) kheadProcessVblankCallbacks_e426af(pGpu, pKernelHead, arg0)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#define kheadProcessVblankCallbacks_HAL(pGpu, pKernelHead, arg0) kheadProcessVblankCallbacks(pGpu, pKernelHead, arg0)
|
||||
|
||||
static inline void kheadResetPendingVblank_e426af(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) {
|
||||
NV_ASSERT_PRECOMP(0);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline void kheadResetPendingVblank(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadResetPendingVblank(pGpu, pKhead, arg0) kheadResetPendingVblank_e426af(pGpu, pKhead, arg0)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#define kheadResetPendingVblank_HAL(pGpu, pKhead, arg0) kheadResetPendingVblank(pGpu, pKhead, arg0)
|
||||
|
||||
static inline void kheadResetPendingVblankForKernel_e426af(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) {
|
||||
NV_ASSERT_PRECOMP(0);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline void kheadResetPendingVblankForKernel(struct OBJGPU *pGpu, struct KernelHead *pKhead, THREAD_STATE_NODE *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadResetPendingVblankForKernel(pGpu, pKhead, arg0) kheadResetPendingVblankForKernel_e426af(pGpu, pKhead, arg0)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#define kheadResetPendingVblankForKernel_HAL(pGpu, pKhead, arg0) kheadResetPendingVblankForKernel(pGpu, pKhead, arg0)
|
||||
|
||||
static inline NvU32 kheadReadPendingVblank_92bfc3(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 intr) {
|
||||
NV_ASSERT_PRECOMP(0);
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline NvU32 kheadReadPendingVblank(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 intr) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
return 0;
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadReadPendingVblank(pGpu, pKernelHead, intr) kheadReadPendingVblank_92bfc3(pGpu, pKernelHead, intr)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#define kheadReadPendingVblank_HAL(pGpu, pKernelHead, intr) kheadReadPendingVblank(pGpu, pKernelHead, intr)
|
||||
|
||||
NV_STATUS kheadConstruct_IMPL(struct KernelHead *arg_pKernelHead);
|
||||
#define __nvoc_kheadConstruct(arg_pKernelHead) kheadConstruct_IMPL(arg_pKernelHead)
|
||||
void kheadAddVblankCallback_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0);
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline void kheadAddVblankCallback(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadAddVblankCallback(pGpu, pKernelHead, arg0) kheadAddVblankCallback_IMPL(pGpu, pKernelHead, arg0)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
void kheadDeleteVblankCallback_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0);
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline void kheadDeleteVblankCallback(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, VBLANKCALLBACK *arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadDeleteVblankCallback(pGpu, pKernelHead, arg0) kheadDeleteVblankCallback_IMPL(pGpu, pKernelHead, arg0)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
NvU32 kheadCheckVblankCallbacksQueued_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0, NvU32 *arg1);
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline NvU32 kheadCheckVblankCallbacksQueued(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0, NvU32 *arg1) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
return 0;
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadCheckVblankCallbacksQueued(pGpu, pKernelHead, arg0, arg1) kheadCheckVblankCallbacksQueued_IMPL(pGpu, pKernelHead, arg0, arg1)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
NvU32 kheadReadVblankIntrState_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead);
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline NvU32 kheadReadVblankIntrState(struct OBJGPU *pGpu, struct KernelHead *pKernelHead) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
return 0;
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadReadVblankIntrState(pGpu, pKernelHead) kheadReadVblankIntrState_IMPL(pGpu, pKernelHead)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
void kheadWriteVblankIntrState_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0);
|
||||
#ifdef __nvoc_kernel_head_h_disabled
|
||||
static inline void kheadWriteVblankIntrState(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 arg0) {
|
||||
NV_ASSERT_FAILED_PRECOMP("KernelHead was disabled!");
|
||||
}
|
||||
#else //__nvoc_kernel_head_h_disabled
|
||||
#define kheadWriteVblankIntrState(pGpu, pKernelHead, arg0) kheadWriteVblankIntrState_IMPL(pGpu, pKernelHead, arg0)
|
||||
#endif //__nvoc_kernel_head_h_disabled
|
||||
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
void kheadProcessVblankCallbacks_IMPL(struct OBJGPU *pGpu, struct KernelHead *pKernelHead, NvU32 state);
|
||||
|
||||
#endif // KERNEL_HEAD_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_KERNEL_HEAD_NVOC_H_
|
||||
@@ -1,428 +0,0 @@
|
||||
#define NVOC_MEM_MGR_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_mem_mgr_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x22ad47 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryManager;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
|
||||
|
||||
void __nvoc_init_MemoryManager(MemoryManager*, RmHalspecOwner* );
|
||||
void __nvoc_init_funcTable_MemoryManager(MemoryManager*, RmHalspecOwner* );
|
||||
NV_STATUS __nvoc_ctor_MemoryManager(MemoryManager*, RmHalspecOwner* );
|
||||
void __nvoc_init_dataField_MemoryManager(MemoryManager*, RmHalspecOwner* );
|
||||
void __nvoc_dtor_MemoryManager(MemoryManager*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_MemoryManager;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_MemoryManager_MemoryManager = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_MemoryManager,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_MemoryManager,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_MemoryManager_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_MemoryManager_OBJENGSTATE = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(MemoryManager, __nvoc_base_OBJENGSTATE),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_MemoryManager = {
|
||||
/*numRelatives=*/ 3,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_MemoryManager_MemoryManager,
|
||||
&__nvoc_rtti_MemoryManager_OBJENGSTATE,
|
||||
&__nvoc_rtti_MemoryManager_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_MemoryManager =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(MemoryManager),
|
||||
/*classId=*/ classId(MemoryManager),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "MemoryManager",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_MemoryManager,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_MemoryManager,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_MemoryManager
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrReconcileTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) {
|
||||
return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStateLoad(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) {
|
||||
return engstateStateLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStateUnload(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) {
|
||||
return engstateStateUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStateInitLocked(POBJGPU pGpu, struct MemoryManager *pEngstate) {
|
||||
return engstateStateInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreLoad(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePostUnload(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_memmgrStateDestroy(POBJGPU pGpu, struct MemoryManager *pEngstate) {
|
||||
engstateStateDestroy(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreUnload(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStateInitUnlocked(POBJGPU pGpu, struct MemoryManager *pEngstate) {
|
||||
return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_memmgrInitMissing(POBJGPU pGpu, struct MemoryManager *pEngstate) {
|
||||
engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreInitLocked(POBJGPU pGpu, struct MemoryManager *pEngstate) {
|
||||
return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePreInitUnlocked(POBJGPU pGpu, struct MemoryManager *pEngstate) {
|
||||
return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrGetTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) {
|
||||
return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrCompareTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunables1, void *pTunables2) {
|
||||
return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunables1, pTunables2);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_memmgrFreeTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) {
|
||||
engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrStatePostLoad(POBJGPU pGpu, struct MemoryManager *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrAllocTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void **ppTunableState) {
|
||||
return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), ppTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrSetTunableState(POBJGPU pGpu, struct MemoryManager *pEngstate, void *pTunableState) {
|
||||
return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_memmgrConstructEngine(POBJGPU pGpu, struct MemoryManager *pEngstate, ENGDESCRIPTOR arg0) {
|
||||
return engstateConstructEngine(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_OBJENGSTATE_memmgrIsPresent(POBJGPU pGpu, struct MemoryManager *pEngstate) {
|
||||
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_MemoryManager_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_MemoryManager =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*);
|
||||
void __nvoc_dtor_MemoryManager(MemoryManager *pThis) {
|
||||
__nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
ChipHal *chipHal = &pRmhalspecowner->chipHal;
|
||||
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
|
||||
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
|
||||
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||
|
||||
// Hal field -- bFbRegionsSupported
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bFbRegionsSupported = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bPmaEnabled
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bPmaEnabled = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bClientPageTablesPmaManaged
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->bClientPageTablesPmaManaged = ((NvBool)(0 == 0));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bClientPageTablesPmaManaged = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bScanoutSysmem
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->bScanoutSysmem = ((NvBool)(0 == 0));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bScanoutSysmem = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bDisallowSplitLowerMemory
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->bDisallowSplitLowerMemory = ((NvBool)(0 == 0));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bDisallowSplitLowerMemory = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bSmallPageCompression
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bSmallPageCompression = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bSysmemCompressionSupportDef
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->bSysmemCompressionSupportDef = ((NvBool)(0 == 0));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bSysmemCompressionSupportDef = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bBug2301372IncreaseRmReserveMemoryWar
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bBug2301372IncreaseRmReserveMemoryWar = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
pThis->bEnableDynamicPageOfflining = ((NvBool)(0 != 0));
|
||||
|
||||
// Hal field -- bVgpuPmaSupport
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bVgpuPmaSupport = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bAllowNoncontiguousAllocation
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->bAllowNoncontiguousAllocation = ((NvBool)(0 == 0));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bAllowNoncontiguousAllocation = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bScrubOnFreeEnabled
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bScrubOnFreeEnabled = ((NvBool)(0 != 0));
|
||||
}
|
||||
|
||||
// Hal field -- bFastScrubberEnabled
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->bFastScrubberEnabled = ((NvBool)(0 != 0));
|
||||
}
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
|
||||
NV_STATUS __nvoc_ctor_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
if (status != NV_OK) goto __nvoc_ctor_MemoryManager_fail_OBJENGSTATE;
|
||||
__nvoc_init_dataField_MemoryManager(pThis, pRmhalspecowner);
|
||||
goto __nvoc_ctor_MemoryManager_exit; // Success
|
||||
|
||||
__nvoc_ctor_MemoryManager_fail_OBJENGSTATE:
|
||||
__nvoc_ctor_MemoryManager_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_MemoryManager_1(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
ChipHal *chipHal = &pRmhalspecowner->chipHal;
|
||||
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
|
||||
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
|
||||
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||
|
||||
pThis->__memmgrReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrReconcileTunableState;
|
||||
|
||||
pThis->__memmgrStateLoad__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateLoad;
|
||||
|
||||
pThis->__memmgrStateUnload__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateUnload;
|
||||
|
||||
pThis->__memmgrStateInitLocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateInitLocked;
|
||||
|
||||
pThis->__memmgrStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreLoad;
|
||||
|
||||
pThis->__memmgrStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePostUnload;
|
||||
|
||||
pThis->__memmgrStateDestroy__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateDestroy;
|
||||
|
||||
pThis->__memmgrStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreUnload;
|
||||
|
||||
pThis->__memmgrStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStateInitUnlocked;
|
||||
|
||||
pThis->__memmgrInitMissing__ = &__nvoc_thunk_OBJENGSTATE_memmgrInitMissing;
|
||||
|
||||
pThis->__memmgrStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreInitLocked;
|
||||
|
||||
pThis->__memmgrStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePreInitUnlocked;
|
||||
|
||||
pThis->__memmgrGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrGetTunableState;
|
||||
|
||||
pThis->__memmgrCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrCompareTunableState;
|
||||
|
||||
pThis->__memmgrFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrFreeTunableState;
|
||||
|
||||
pThis->__memmgrStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_memmgrStatePostLoad;
|
||||
|
||||
pThis->__memmgrAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrAllocTunableState;
|
||||
|
||||
pThis->__memmgrSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_memmgrSetTunableState;
|
||||
|
||||
pThis->__memmgrConstructEngine__ = &__nvoc_thunk_OBJENGSTATE_memmgrConstructEngine;
|
||||
|
||||
pThis->__memmgrIsPresent__ = &__nvoc_thunk_OBJENGSTATE_memmgrIsPresent;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
__nvoc_init_funcTable_MemoryManager_1(pThis, pRmhalspecowner);
|
||||
}
|
||||
|
||||
void __nvoc_init_OBJENGSTATE(OBJENGSTATE*);
|
||||
void __nvoc_init_MemoryManager(MemoryManager *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
pThis->__nvoc_pbase_MemoryManager = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE;
|
||||
__nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
__nvoc_init_funcTable_MemoryManager(pThis, pRmhalspecowner);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_MemoryManager(MemoryManager **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
MemoryManager *pThis;
|
||||
RmHalspecOwner *pRmhalspecowner;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(MemoryManager));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(MemoryManager));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_MemoryManager);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL)
|
||||
pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent);
|
||||
NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT);
|
||||
|
||||
__nvoc_init_MemoryManager(pThis, pRmhalspecowner);
|
||||
status = __nvoc_ctor_MemoryManager(pThis, pRmhalspecowner);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_MemoryManager_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_MemoryManager_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_MemoryManager(MemoryManager **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_MemoryManager(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,312 +0,0 @@
|
||||
#define NVOC_MEM_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_mem_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x4789f2 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
void __nvoc_init_Memory(Memory*);
|
||||
void __nvoc_init_funcTable_Memory(Memory*);
|
||||
NV_STATUS __nvoc_ctor_Memory(Memory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_Memory(Memory*);
|
||||
void __nvoc_dtor_Memory(Memory*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Memory;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Memory_Memory = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Memory,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Memory,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Memory_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Memory_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Memory_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Memory, __nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Memory_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(Memory, __nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_Memory = {
|
||||
/*numRelatives=*/ 5,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_Memory_Memory,
|
||||
&__nvoc_rtti_Memory_RmResource,
|
||||
&__nvoc_rtti_Memory_RmResourceCommon,
|
||||
&__nvoc_rtti_Memory_RsResource,
|
||||
&__nvoc_rtti_Memory_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_Memory =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(Memory),
|
||||
/*classId=*/ classId(Memory),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "Memory",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Memory,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_Memory,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_Memory
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_resControl(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return memControl((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_resMap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
|
||||
return memMap((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RsResource.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_resUnmap(struct RsResource *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
|
||||
return memUnmap((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RsResource.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_rmresGetMemInterMapParams(struct RmResource *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RmResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_rmresCheckMemInterUnmap(struct RmResource *pMemory, NvBool bSubdeviceHandleProvided) {
|
||||
return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RmResource.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_rmresGetMemoryMappingDescriptor(struct RmResource *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) - __nvoc_rtti_Memory_RmResource.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_memShareCallback(struct Memory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_memGetRefCount(struct Memory *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_memControlFilter(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_memAddAdditionalDependants(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_memControl_Prologue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RsResource_memCanCopy(struct Memory *pResource) {
|
||||
return resCanCopy((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_memMapTo(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_memPreDestruct(struct Memory *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_memUnmapFrom(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_memControl_Epilogue(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_memControlLookup(struct Memory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_memAccessCallback(struct Memory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_Memory_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_Memory =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_RmResource(RmResource*);
|
||||
void __nvoc_dtor_Memory(Memory *pThis) {
|
||||
__nvoc_memDestruct(pThis);
|
||||
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_Memory(Memory *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_RmResource(RmResource* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_Memory(Memory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_RmResource(&pThis->__nvoc_base_RmResource, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_Memory_fail_RmResource;
|
||||
__nvoc_init_dataField_Memory(pThis);
|
||||
|
||||
status = __nvoc_memConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_Memory_fail__init;
|
||||
goto __nvoc_ctor_Memory_exit; // Success
|
||||
|
||||
__nvoc_ctor_Memory_fail__init:
|
||||
__nvoc_dtor_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
__nvoc_ctor_Memory_fail_RmResource:
|
||||
__nvoc_ctor_Memory_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_Memory_1(Memory *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__memGetMapAddrSpace__ = &memGetMapAddrSpace_IMPL;
|
||||
|
||||
pThis->__memControl__ = &memControl_IMPL;
|
||||
|
||||
pThis->__memMap__ = &memMap_IMPL;
|
||||
|
||||
pThis->__memUnmap__ = &memUnmap_IMPL;
|
||||
|
||||
pThis->__memGetMemInterMapParams__ = &memGetMemInterMapParams_IMPL;
|
||||
|
||||
pThis->__memCheckMemInterUnmap__ = &memCheckMemInterUnmap_ac1694;
|
||||
|
||||
pThis->__memGetMemoryMappingDescriptor__ = &memGetMemoryMappingDescriptor_IMPL;
|
||||
|
||||
pThis->__memCheckCopyPermissions__ = &memCheckCopyPermissions_ac1694;
|
||||
|
||||
pThis->__memIsReady__ = &memIsReady_IMPL;
|
||||
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resControl__ = &__nvoc_thunk_Memory_resControl;
|
||||
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resMap__ = &__nvoc_thunk_Memory_resMap;
|
||||
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__resUnmap__ = &__nvoc_thunk_Memory_resUnmap;
|
||||
|
||||
pThis->__nvoc_base_RmResource.__rmresGetMemInterMapParams__ = &__nvoc_thunk_Memory_rmresGetMemInterMapParams;
|
||||
|
||||
pThis->__nvoc_base_RmResource.__rmresCheckMemInterUnmap__ = &__nvoc_thunk_Memory_rmresCheckMemInterUnmap;
|
||||
|
||||
pThis->__nvoc_base_RmResource.__rmresGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_rmresGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__memShareCallback__ = &__nvoc_thunk_RmResource_memShareCallback;
|
||||
|
||||
pThis->__memGetRefCount__ = &__nvoc_thunk_RsResource_memGetRefCount;
|
||||
|
||||
pThis->__memControlFilter__ = &__nvoc_thunk_RsResource_memControlFilter;
|
||||
|
||||
pThis->__memAddAdditionalDependants__ = &__nvoc_thunk_RsResource_memAddAdditionalDependants;
|
||||
|
||||
pThis->__memControl_Prologue__ = &__nvoc_thunk_RmResource_memControl_Prologue;
|
||||
|
||||
pThis->__memCanCopy__ = &__nvoc_thunk_RsResource_memCanCopy;
|
||||
|
||||
pThis->__memMapTo__ = &__nvoc_thunk_RsResource_memMapTo;
|
||||
|
||||
pThis->__memPreDestruct__ = &__nvoc_thunk_RsResource_memPreDestruct;
|
||||
|
||||
pThis->__memUnmapFrom__ = &__nvoc_thunk_RsResource_memUnmapFrom;
|
||||
|
||||
pThis->__memControl_Epilogue__ = &__nvoc_thunk_RmResource_memControl_Epilogue;
|
||||
|
||||
pThis->__memControlLookup__ = &__nvoc_thunk_RsResource_memControlLookup;
|
||||
|
||||
pThis->__memAccessCallback__ = &__nvoc_thunk_RmResource_memAccessCallback;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_Memory(Memory *pThis) {
|
||||
__nvoc_init_funcTable_Memory_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_RmResource(RmResource*);
|
||||
void __nvoc_init_Memory(Memory *pThis) {
|
||||
pThis->__nvoc_pbase_Memory = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_RmResource;
|
||||
__nvoc_init_RmResource(&pThis->__nvoc_base_RmResource);
|
||||
__nvoc_init_funcTable_Memory(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_Memory(Memory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
Memory *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(Memory));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(Memory));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Memory);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_Memory(pThis);
|
||||
status = __nvoc_ctor_Memory(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_Memory_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_Memory_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_Memory(Memory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_Memory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,417 +0,0 @@
|
||||
#ifndef _G_MEM_NVOC_H_
|
||||
#define _G_MEM_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "g_mem_nvoc.h"
|
||||
|
||||
#ifndef _MEMORY_API_H_
|
||||
#define _MEMORY_API_H_
|
||||
|
||||
#include "core/core.h"
|
||||
#include "resserv/rs_resource.h"
|
||||
#include "rmapi/rmapi.h"
|
||||
#include "rmapi/resource.h"
|
||||
|
||||
#include "containers/btree.h"
|
||||
|
||||
#include "ctrl/ctrl0041.h"
|
||||
|
||||
struct Device;
|
||||
|
||||
#ifndef __NVOC_CLASS_Device_TYPEDEF__
|
||||
#define __NVOC_CLASS_Device_TYPEDEF__
|
||||
typedef struct Device Device;
|
||||
#endif /* __NVOC_CLASS_Device_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_Device
|
||||
#define __nvoc_class_id_Device 0xe0ac20
|
||||
#endif /* __nvoc_class_id_Device */
|
||||
|
||||
|
||||
struct Subdevice;
|
||||
|
||||
#ifndef __NVOC_CLASS_Subdevice_TYPEDEF__
|
||||
#define __NVOC_CLASS_Subdevice_TYPEDEF__
|
||||
typedef struct Subdevice Subdevice;
|
||||
#endif /* __NVOC_CLASS_Subdevice_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_Subdevice
|
||||
#define __nvoc_class_id_Subdevice 0x4b01b3
|
||||
#endif /* __nvoc_class_id_Subdevice */
|
||||
|
||||
|
||||
struct RsClient;
|
||||
|
||||
#ifndef __NVOC_CLASS_RsClient_TYPEDEF__
|
||||
#define __NVOC_CLASS_RsClient_TYPEDEF__
|
||||
typedef struct RsClient RsClient;
|
||||
#endif /* __NVOC_CLASS_RsClient_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_RsClient
|
||||
#define __nvoc_class_id_RsClient 0x8f87e5
|
||||
#endif /* __nvoc_class_id_RsClient */
|
||||
|
||||
|
||||
struct Heap;
|
||||
|
||||
#ifndef __NVOC_CLASS_Heap_TYPEDEF__
|
||||
#define __NVOC_CLASS_Heap_TYPEDEF__
|
||||
typedef struct Heap Heap;
|
||||
#endif /* __NVOC_CLASS_Heap_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_Heap
|
||||
#define __nvoc_class_id_Heap 0x556e9a
|
||||
#endif /* __nvoc_class_id_Heap */
|
||||
|
||||
|
||||
struct OBJGPU;
|
||||
|
||||
#ifndef __NVOC_CLASS_OBJGPU_TYPEDEF__
|
||||
#define __NVOC_CLASS_OBJGPU_TYPEDEF__
|
||||
typedef struct OBJGPU OBJGPU;
|
||||
#endif /* __NVOC_CLASS_OBJGPU_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_OBJGPU
|
||||
#define __nvoc_class_id_OBJGPU 0x7ef3cb
|
||||
#endif /* __nvoc_class_id_OBJGPU */
|
||||
|
||||
|
||||
typedef struct MEMORY_DESCRIPTOR MEMORY_DESCRIPTOR;
|
||||
typedef struct PmuMapping PmuMapping;
|
||||
typedef struct HWRESOURCE_INFO HWRESOURCE_INFO;
|
||||
|
||||
//
|
||||
// vGPU non-stall interrupt info
|
||||
//
|
||||
typedef struct _def_client_vgpu_ns_intr
|
||||
{
|
||||
NvU32 nsSemValue; // Non stall interrupt semaphore value
|
||||
NvU32 nsSemOffset; // Non stall interrupt semaphore offset. Currently it is always 0.
|
||||
NvBool isSemaMemValidationEnabled; // Enable change in Non stall interrupt sema value check
|
||||
// while generating event
|
||||
NvU64 guestDomainId; // guest ID that we need to use to inject interrupt
|
||||
NvU64 guestMSIAddr; // MSI address allocated by guest OS
|
||||
NvU32 guestMSIData; // MSI data value set by guest OS
|
||||
void *pVgpuVfioRef; // Reference to vgpu device in nvidia-vgpu-vfio module
|
||||
void *pVmBusHostChannel; // VmBus Host channel to communicated the event with the Guest
|
||||
void *pEventDpc; // DPC event to pass the interrupt
|
||||
} VGPU_NS_INTR;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
struct Memory *pNext;
|
||||
struct Memory *pPrev;
|
||||
} memCircularListItem;
|
||||
|
||||
/*!
|
||||
* RM internal class representing NV01_MEMORY_XXX
|
||||
*
|
||||
* @note Memory cannot be a GpuResource because NoDeviceMemory
|
||||
* subclass is not allocated under a device.
|
||||
*/
|
||||
#ifdef NVOC_MEM_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct Memory {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct RmResource __nvoc_base_RmResource;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct Memory *__nvoc_pbase_Memory;
|
||||
NV_STATUS (*__memGetMapAddrSpace__)(struct Memory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
|
||||
NV_STATUS (*__memControl__)(struct Memory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__memMap__)(struct Memory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *);
|
||||
NV_STATUS (*__memUnmap__)(struct Memory *, CALL_CONTEXT *, RsCpuMapping *);
|
||||
NV_STATUS (*__memGetMemInterMapParams__)(struct Memory *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__memCheckMemInterUnmap__)(struct Memory *, NvBool);
|
||||
NV_STATUS (*__memGetMemoryMappingDescriptor__)(struct Memory *, MEMORY_DESCRIPTOR **);
|
||||
NV_STATUS (*__memCheckCopyPermissions__)(struct Memory *, struct OBJGPU *, NvHandle);
|
||||
NV_STATUS (*__memIsReady__)(struct Memory *);
|
||||
NvBool (*__memShareCallback__)(struct Memory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NvU32 (*__memGetRefCount__)(struct Memory *);
|
||||
NV_STATUS (*__memControlFilter__)(struct Memory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
void (*__memAddAdditionalDependants__)(struct RsClient *, struct Memory *, RsResourceRef *);
|
||||
NV_STATUS (*__memControl_Prologue__)(struct Memory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NvBool (*__memCanCopy__)(struct Memory *);
|
||||
NV_STATUS (*__memMapTo__)(struct Memory *, RS_RES_MAP_TO_PARAMS *);
|
||||
void (*__memPreDestruct__)(struct Memory *);
|
||||
NV_STATUS (*__memUnmapFrom__)(struct Memory *, RS_RES_UNMAP_FROM_PARAMS *);
|
||||
void (*__memControl_Epilogue__)(struct Memory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__memControlLookup__)(struct Memory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NvBool (*__memAccessCallback__)(struct Memory *, struct RsClient *, void *, RsAccessRight);
|
||||
NvBool bConstructed;
|
||||
struct Device *pDevice;
|
||||
struct Subdevice *pSubDevice;
|
||||
struct OBJGPU *pGpu;
|
||||
NvBool bBcResource;
|
||||
NvU32 categoryClassId;
|
||||
NvU64 Length;
|
||||
NvU32 HeapOwner;
|
||||
NvU32 RefCount;
|
||||
struct Heap *pHeap;
|
||||
MEMORY_DESCRIPTOR *pMemDesc;
|
||||
NvBool isMemDescOwner;
|
||||
memCircularListItem dupListItem;
|
||||
NvP64 KernelVAddr;
|
||||
NvP64 KernelMapPriv;
|
||||
PmuMapping *pPmuMappingList;
|
||||
NODE Node;
|
||||
NvU32 Attr;
|
||||
NvU32 Attr2;
|
||||
NvU32 Pitch;
|
||||
NvU32 Type;
|
||||
NvU32 Flags;
|
||||
NvU32 tag;
|
||||
NvU64 osDeviceHandle;
|
||||
HWRESOURCE_INFO *pHwResource;
|
||||
NvBool bRpcAlloc;
|
||||
VGPU_NS_INTR vgpuNsIntr;
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_Memory_TYPEDEF__
|
||||
#define __NVOC_CLASS_Memory_TYPEDEF__
|
||||
typedef struct Memory Memory;
|
||||
#endif /* __NVOC_CLASS_Memory_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_Memory
|
||||
#define __nvoc_class_id_Memory 0x4789f2
|
||||
#endif /* __nvoc_class_id_Memory */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory;
|
||||
|
||||
#define __staticCast_Memory(pThis) \
|
||||
((pThis)->__nvoc_pbase_Memory)
|
||||
|
||||
#ifdef __nvoc_mem_h_disabled
|
||||
#define __dynamicCast_Memory(pThis) ((Memory*)NULL)
|
||||
#else //__nvoc_mem_h_disabled
|
||||
#define __dynamicCast_Memory(pThis) \
|
||||
((Memory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(Memory)))
|
||||
#endif //__nvoc_mem_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_Memory(Memory**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_Memory(Memory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
#define __objCreate_Memory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
|
||||
__nvoc_objCreate_Memory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
|
||||
|
||||
#define memGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) memGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace)
|
||||
#define memControl(pMemory, pCallContext, pParams) memControl_DISPATCH(pMemory, pCallContext, pParams)
|
||||
#define memMap(pMemory, pCallContext, pParams, pCpuMapping) memMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping)
|
||||
#define memUnmap(pMemory, pCallContext, pCpuMapping) memUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping)
|
||||
#define memGetMemInterMapParams(pMemory, pParams) memGetMemInterMapParams_DISPATCH(pMemory, pParams)
|
||||
#define memCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) memCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided)
|
||||
#define memGetMemoryMappingDescriptor(pMemory, ppMemDesc) memGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc)
|
||||
#define memCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) memCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool)
|
||||
#define memIsReady(pMemory) memIsReady_DISPATCH(pMemory)
|
||||
#define memShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) memShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define memGetRefCount(pResource) memGetRefCount_DISPATCH(pResource)
|
||||
#define memControlFilter(pResource, pCallContext, pParams) memControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define memAddAdditionalDependants(pClient, pResource, pReference) memAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define memControl_Prologue(pResource, pCallContext, pParams) memControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define memCanCopy(pResource) memCanCopy_DISPATCH(pResource)
|
||||
#define memMapTo(pResource, pParams) memMapTo_DISPATCH(pResource, pParams)
|
||||
#define memPreDestruct(pResource) memPreDestruct_DISPATCH(pResource)
|
||||
#define memUnmapFrom(pResource, pParams) memUnmapFrom_DISPATCH(pResource, pParams)
|
||||
#define memControl_Epilogue(pResource, pCallContext, pParams) memControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define memControlLookup(pResource, pParams, ppEntry) memControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define memAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) memAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
NV_STATUS memGetMapAddrSpace_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace);
|
||||
|
||||
static inline NV_STATUS memGetMapAddrSpace_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return pMemory->__memGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
NV_STATUS memControl_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams);
|
||||
|
||||
static inline NV_STATUS memControl_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pMemory->__memControl__(pMemory, pCallContext, pParams);
|
||||
}
|
||||
|
||||
NV_STATUS memMap_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping);
|
||||
|
||||
static inline NV_STATUS memMap_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
|
||||
return pMemory->__memMap__(pMemory, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
NV_STATUS memUnmap_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping);
|
||||
|
||||
static inline NV_STATUS memUnmap_DISPATCH(struct Memory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
|
||||
return pMemory->__memUnmap__(pMemory, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
NV_STATUS memGetMemInterMapParams_IMPL(struct Memory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams);
|
||||
|
||||
static inline NV_STATUS memGetMemInterMapParams_DISPATCH(struct Memory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pMemory->__memGetMemInterMapParams__(pMemory, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS memCheckMemInterUnmap_ac1694(struct Memory *pMemory, NvBool bSubdeviceHandleProvided) {
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static inline NV_STATUS memCheckMemInterUnmap_DISPATCH(struct Memory *pMemory, NvBool bSubdeviceHandleProvided) {
|
||||
return pMemory->__memCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
NV_STATUS memGetMemoryMappingDescriptor_IMPL(struct Memory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc);
|
||||
|
||||
static inline NV_STATUS memGetMemoryMappingDescriptor_DISPATCH(struct Memory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pMemory->__memGetMemoryMappingDescriptor__(pMemory, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline NV_STATUS memCheckCopyPermissions_ac1694(struct Memory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) {
|
||||
return NV_OK;
|
||||
}
|
||||
|
||||
static inline NV_STATUS memCheckCopyPermissions_DISPATCH(struct Memory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) {
|
||||
return pMemory->__memCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool);
|
||||
}
|
||||
|
||||
NV_STATUS memIsReady_IMPL(struct Memory *pMemory);
|
||||
|
||||
static inline NV_STATUS memIsReady_DISPATCH(struct Memory *pMemory) {
|
||||
return pMemory->__memIsReady__(pMemory);
|
||||
}
|
||||
|
||||
static inline NvBool memShareCallback_DISPATCH(struct Memory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pResource->__memShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NvU32 memGetRefCount_DISPATCH(struct Memory *pResource) {
|
||||
return pResource->__memGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS memControlFilter_DISPATCH(struct Memory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__memControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline void memAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct Memory *pResource, RsResourceRef *pReference) {
|
||||
pResource->__memAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline NV_STATUS memControl_Prologue_DISPATCH(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__memControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NvBool memCanCopy_DISPATCH(struct Memory *pResource) {
|
||||
return pResource->__memCanCopy__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS memMapTo_DISPATCH(struct Memory *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pResource->__memMapTo__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void memPreDestruct_DISPATCH(struct Memory *pResource) {
|
||||
pResource->__memPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS memUnmapFrom_DISPATCH(struct Memory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pResource->__memUnmapFrom__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void memControl_Epilogue_DISPATCH(struct Memory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__memControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS memControlLookup_DISPATCH(struct Memory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__memControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NvBool memAccessCallback_DISPATCH(struct Memory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__memAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
NV_STATUS memConstruct_IMPL(struct Memory *arg_pMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
|
||||
#define __nvoc_memConstruct(arg_pMemory, arg_pCallContext, arg_pParams) memConstruct_IMPL(arg_pMemory, arg_pCallContext, arg_pParams)
|
||||
NV_STATUS memCopyConstruct_IMPL(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams);
|
||||
#ifdef __nvoc_mem_h_disabled
|
||||
static inline NV_STATUS memCopyConstruct(struct Memory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *pParams) {
|
||||
NV_ASSERT_FAILED_PRECOMP("Memory was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_mem_h_disabled
|
||||
#define memCopyConstruct(pMemory, pCallContext, pParams) memCopyConstruct_IMPL(pMemory, pCallContext, pParams)
|
||||
#endif //__nvoc_mem_h_disabled
|
||||
|
||||
void memDestruct_IMPL(struct Memory *pMemory);
|
||||
#define __nvoc_memDestruct(pMemory) memDestruct_IMPL(pMemory)
|
||||
NV_STATUS memConstructCommon_IMPL(struct Memory *pMemory, NvU32 categoryClassId, NvU32 flags, MEMORY_DESCRIPTOR *pMemDesc, NvU32 heapOwner, struct Heap *pHeap, NvU32 attr, NvU32 attr2, NvU32 Pitch, NvU32 type, NvU32 tag, HWRESOURCE_INFO *pHwResource);
|
||||
#ifdef __nvoc_mem_h_disabled
|
||||
static inline NV_STATUS memConstructCommon(struct Memory *pMemory, NvU32 categoryClassId, NvU32 flags, MEMORY_DESCRIPTOR *pMemDesc, NvU32 heapOwner, struct Heap *pHeap, NvU32 attr, NvU32 attr2, NvU32 Pitch, NvU32 type, NvU32 tag, HWRESOURCE_INFO *pHwResource) {
|
||||
NV_ASSERT_FAILED_PRECOMP("Memory was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_mem_h_disabled
|
||||
#define memConstructCommon(pMemory, categoryClassId, flags, pMemDesc, heapOwner, pHeap, attr, attr2, Pitch, type, tag, pHwResource) memConstructCommon_IMPL(pMemory, categoryClassId, flags, pMemDesc, heapOwner, pHeap, attr, attr2, Pitch, type, tag, pHwResource)
|
||||
#endif //__nvoc_mem_h_disabled
|
||||
|
||||
void memDestructCommon_IMPL(struct Memory *pMemory);
|
||||
#ifdef __nvoc_mem_h_disabled
|
||||
static inline void memDestructCommon(struct Memory *pMemory) {
|
||||
NV_ASSERT_FAILED_PRECOMP("Memory was disabled!");
|
||||
}
|
||||
#else //__nvoc_mem_h_disabled
|
||||
#define memDestructCommon(pMemory) memDestructCommon_IMPL(pMemory)
|
||||
#endif //__nvoc_mem_h_disabled
|
||||
|
||||
NV_STATUS memCreateMemDesc_IMPL(struct OBJGPU *pGpu, MEMORY_DESCRIPTOR **ppMemDesc, NV_ADDRESS_SPACE addrSpace, NvU64 FBOffset, NvU64 length, NvU32 attr, NvU32 attr2);
|
||||
#define memCreateMemDesc(pGpu, ppMemDesc, addrSpace, FBOffset, length, attr, attr2) memCreateMemDesc_IMPL(pGpu, ppMemDesc, addrSpace, FBOffset, length, attr, attr2)
|
||||
NV_STATUS memCreateKernelMapping_IMPL(struct Memory *pMemory, NvU32 Protect, NvBool bClear);
|
||||
#ifdef __nvoc_mem_h_disabled
|
||||
static inline NV_STATUS memCreateKernelMapping(struct Memory *pMemory, NvU32 Protect, NvBool bClear) {
|
||||
NV_ASSERT_FAILED_PRECOMP("Memory was disabled!");
|
||||
return NV_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
#else //__nvoc_mem_h_disabled
|
||||
#define memCreateKernelMapping(pMemory, Protect, bClear) memCreateKernelMapping_IMPL(pMemory, Protect, bClear)
|
||||
#endif //__nvoc_mem_h_disabled
|
||||
|
||||
NV_STATUS memGetByHandle_IMPL(struct RsClient *pClient, NvHandle hMemory, struct Memory **ppMemory);
|
||||
#define memGetByHandle(pClient, hMemory, ppMemory) memGetByHandle_IMPL(pClient, hMemory, ppMemory)
|
||||
NV_STATUS memGetByHandleAndDevice_IMPL(struct RsClient *pClient, NvHandle hMemory, NvHandle hDevice, struct Memory **ppMemory);
|
||||
#define memGetByHandleAndDevice(pClient, hMemory, hDevice, ppMemory) memGetByHandleAndDevice_IMPL(pClient, hMemory, hDevice, ppMemory)
|
||||
NV_STATUS memGetByHandleAndGroupedGpu_IMPL(struct RsClient *pClient, NvHandle hMemory, struct OBJGPU *pGpu, struct Memory **ppMemory);
|
||||
#define memGetByHandleAndGroupedGpu(pClient, hMemory, pGpu, ppMemory) memGetByHandleAndGroupedGpu_IMPL(pClient, hMemory, pGpu, ppMemory)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_MEM_NVOC_H_
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,130 +0,0 @@
|
||||
#define NVOC_OBJECT_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_object_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x497031 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_funcTable_Object(Object*);
|
||||
NV_STATUS __nvoc_ctor_Object(Object*);
|
||||
void __nvoc_init_dataField_Object(Object*);
|
||||
void __nvoc_dtor_Object(Object*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_Object;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_Object_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_Object,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_Object = {
|
||||
/*numRelatives=*/ 1,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_Object_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_Object =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(Object),
|
||||
/*classId=*/ classId(Object),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "Object",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_Object,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_Object,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_Object
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_Object =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Object(Object *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_Object(Object *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Object(Object *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
__nvoc_init_dataField_Object(pThis);
|
||||
goto __nvoc_ctor_Object_exit; // Success
|
||||
|
||||
__nvoc_ctor_Object_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_Object_1(Object *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_Object(Object *pThis) {
|
||||
__nvoc_init_funcTable_Object_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_Object(Object *pThis) {
|
||||
pThis->__nvoc_pbase_Object = pThis;
|
||||
__nvoc_init_funcTable_Object(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_Object(Object **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
Object *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(Object));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(Object));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_Object);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, pThis);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_Object(pThis);
|
||||
status = __nvoc_ctor_Object(pThis);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_Object_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_Object_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_Object(Object **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_Object(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,357 +0,0 @@
|
||||
#define NVOC_OBJTMR_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_objtmr_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0x9ddede = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTMR;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJENGSTATE;
|
||||
|
||||
void __nvoc_init_OBJTMR(OBJTMR*, RmHalspecOwner* );
|
||||
void __nvoc_init_funcTable_OBJTMR(OBJTMR*, RmHalspecOwner* );
|
||||
NV_STATUS __nvoc_ctor_OBJTMR(OBJTMR*, RmHalspecOwner* );
|
||||
void __nvoc_init_dataField_OBJTMR(OBJTMR*, RmHalspecOwner* );
|
||||
void __nvoc_dtor_OBJTMR(OBJTMR*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJTMR;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJTMR_OBJTMR = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJTMR,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJTMR,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJTMR_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJTMR_OBJENGSTATE = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJENGSTATE,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJTMR, __nvoc_base_OBJENGSTATE),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJTMR = {
|
||||
/*numRelatives=*/ 3,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_OBJTMR_OBJTMR,
|
||||
&__nvoc_rtti_OBJTMR_OBJENGSTATE,
|
||||
&__nvoc_rtti_OBJTMR_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJTMR =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(OBJTMR),
|
||||
/*classId=*/ classId(OBJTMR),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "OBJTMR",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJTMR,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_OBJTMR,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_OBJTMR
|
||||
};
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJTMR_engstateConstructEngine(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr, ENGDESCRIPTOR arg0) {
|
||||
return tmrConstructEngine(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJTMR_engstateStateInitLocked(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr) {
|
||||
return tmrStateInitLocked(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJTMR_engstateStateLoad(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr, NvU32 arg0) {
|
||||
return tmrStateLoad(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJTMR_engstateStateUnload(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr, NvU32 arg0) {
|
||||
return tmrStateUnload(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJTMR_engstateStateDestroy(struct OBJGPU *pGpu, struct OBJENGSTATE *pTmr) {
|
||||
tmrStateDestroy(pGpu, (struct OBJTMR *)(((unsigned char *)pTmr) - __nvoc_rtti_OBJTMR_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrReconcileTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) {
|
||||
return engstateReconcileTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePreLoad(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePreLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePostUnload(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePostUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePreUnload(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePreUnload(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStateInitUnlocked(POBJGPU pGpu, struct OBJTMR *pEngstate) {
|
||||
return engstateStateInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_tmrInitMissing(POBJGPU pGpu, struct OBJTMR *pEngstate) {
|
||||
engstateInitMissing(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePreInitLocked(POBJGPU pGpu, struct OBJTMR *pEngstate) {
|
||||
return engstateStatePreInitLocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePreInitUnlocked(POBJGPU pGpu, struct OBJTMR *pEngstate) {
|
||||
return engstateStatePreInitUnlocked(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrGetTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) {
|
||||
return engstateGetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrCompareTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunables1, void *pTunables2) {
|
||||
return engstateCompareTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunables1, pTunables2);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_OBJENGSTATE_tmrFreeTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) {
|
||||
engstateFreeTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrStatePostLoad(POBJGPU pGpu, struct OBJTMR *pEngstate, NvU32 arg0) {
|
||||
return engstateStatePostLoad(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), arg0);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrAllocTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void **ppTunableState) {
|
||||
return engstateAllocTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), ppTunableState);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_OBJENGSTATE_tmrSetTunableState(POBJGPU pGpu, struct OBJTMR *pEngstate, void *pTunableState) {
|
||||
return engstateSetTunableState(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset), pTunableState);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_OBJENGSTATE_tmrIsPresent(POBJGPU pGpu, struct OBJTMR *pEngstate) {
|
||||
return engstateIsPresent(pGpu, (struct OBJENGSTATE *)(((unsigned char *)pEngstate) + __nvoc_rtti_OBJTMR_OBJENGSTATE.offset));
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJTMR =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_OBJENGSTATE(OBJENGSTATE*);
|
||||
void __nvoc_dtor_OBJTMR(OBJTMR *pThis) {
|
||||
__nvoc_tmrDestruct(pThis);
|
||||
__nvoc_dtor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
ChipHal *chipHal = &pRmhalspecowner->chipHal;
|
||||
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
|
||||
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
|
||||
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS, ((NvBool)(0 == 0)));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_TMR_USE_COUNTDOWN_TIMER_FOR_RM_CALLBACKS, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE, ((NvBool)(0 == 0)));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_TMR_ALARM_INTR_REMOVED_FROM_PMC_TREE, ((NvBool)(0 != 0)));
|
||||
}
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS
|
||||
if (( ((chipHal_HalVarIdx >> 5) == 2UL) && ((1UL << (chipHal_HalVarIdx & 0x1f)) & 0x00010000UL) )) /* ChipHal: T234D */
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS, ((NvBool)(0 == 0)));
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_TMR_USE_OS_TIMER_FOR_CALLBACKS, ((NvBool)(0 != 0)));
|
||||
}
|
||||
pThis->setProperty(pThis, PDB_PROP_TMR_USE_PTIMER_FOR_OSTIMER_CALLBACKS, (0));
|
||||
pThis->setProperty(pThis, PDB_PROP_TMR_USE_POLLING_FOR_CALLBACKS, (0));
|
||||
|
||||
// NVOC Property Hal field -- PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL
|
||||
if (0)
|
||||
{
|
||||
}
|
||||
// default
|
||||
else
|
||||
{
|
||||
pThis->setProperty(pThis, PDB_PROP_TMR_USE_SECOND_COUNTDOWN_TIMER_FOR_SWRL, ((NvBool)(0 != 0)));
|
||||
}
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_OBJENGSTATE(OBJENGSTATE* );
|
||||
NV_STATUS __nvoc_ctor_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJTMR_fail_OBJENGSTATE;
|
||||
__nvoc_init_dataField_OBJTMR(pThis, pRmhalspecowner);
|
||||
goto __nvoc_ctor_OBJTMR_exit; // Success
|
||||
|
||||
__nvoc_ctor_OBJTMR_fail_OBJENGSTATE:
|
||||
__nvoc_ctor_OBJTMR_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_OBJTMR_1(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
ChipHal *chipHal = &pRmhalspecowner->chipHal;
|
||||
const unsigned long chipHal_HalVarIdx = (unsigned long)chipHal->__nvoc_HalVarIdx;
|
||||
RmVariantHal *rmVariantHal = &pRmhalspecowner->rmVariantHal;
|
||||
const unsigned long rmVariantHal_HalVarIdx = (unsigned long)rmVariantHal->__nvoc_HalVarIdx;
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
PORT_UNREFERENCED_VARIABLE(pRmhalspecowner);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal);
|
||||
PORT_UNREFERENCED_VARIABLE(chipHal_HalVarIdx);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal);
|
||||
PORT_UNREFERENCED_VARIABLE(rmVariantHal_HalVarIdx);
|
||||
|
||||
pThis->__tmrConstructEngine__ = &tmrConstructEngine_IMPL;
|
||||
|
||||
pThis->__tmrStateInitLocked__ = &tmrStateInitLocked_IMPL;
|
||||
|
||||
pThis->__tmrStateLoad__ = &tmrStateLoad_IMPL;
|
||||
|
||||
pThis->__tmrStateUnload__ = &tmrStateUnload_IMPL;
|
||||
|
||||
pThis->__tmrStateDestroy__ = &tmrStateDestroy_IMPL;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateConstructEngine__ = &__nvoc_thunk_OBJTMR_engstateConstructEngine;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateInitLocked__ = &__nvoc_thunk_OBJTMR_engstateStateInitLocked;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateLoad__ = &__nvoc_thunk_OBJTMR_engstateStateLoad;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateUnload__ = &__nvoc_thunk_OBJTMR_engstateStateUnload;
|
||||
|
||||
pThis->__nvoc_base_OBJENGSTATE.__engstateStateDestroy__ = &__nvoc_thunk_OBJTMR_engstateStateDestroy;
|
||||
|
||||
pThis->__tmrReconcileTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrReconcileTunableState;
|
||||
|
||||
pThis->__tmrStatePreLoad__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePreLoad;
|
||||
|
||||
pThis->__tmrStatePostUnload__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePostUnload;
|
||||
|
||||
pThis->__tmrStatePreUnload__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePreUnload;
|
||||
|
||||
pThis->__tmrStateInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_tmrStateInitUnlocked;
|
||||
|
||||
pThis->__tmrInitMissing__ = &__nvoc_thunk_OBJENGSTATE_tmrInitMissing;
|
||||
|
||||
pThis->__tmrStatePreInitLocked__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePreInitLocked;
|
||||
|
||||
pThis->__tmrStatePreInitUnlocked__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePreInitUnlocked;
|
||||
|
||||
pThis->__tmrGetTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrGetTunableState;
|
||||
|
||||
pThis->__tmrCompareTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrCompareTunableState;
|
||||
|
||||
pThis->__tmrFreeTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrFreeTunableState;
|
||||
|
||||
pThis->__tmrStatePostLoad__ = &__nvoc_thunk_OBJENGSTATE_tmrStatePostLoad;
|
||||
|
||||
pThis->__tmrAllocTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrAllocTunableState;
|
||||
|
||||
pThis->__tmrSetTunableState__ = &__nvoc_thunk_OBJENGSTATE_tmrSetTunableState;
|
||||
|
||||
pThis->__tmrIsPresent__ = &__nvoc_thunk_OBJENGSTATE_tmrIsPresent;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
__nvoc_init_funcTable_OBJTMR_1(pThis, pRmhalspecowner);
|
||||
}
|
||||
|
||||
void __nvoc_init_OBJENGSTATE(OBJENGSTATE*);
|
||||
void __nvoc_init_OBJTMR(OBJTMR *pThis, RmHalspecOwner *pRmhalspecowner) {
|
||||
pThis->__nvoc_pbase_OBJTMR = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_OBJENGSTATE = &pThis->__nvoc_base_OBJENGSTATE;
|
||||
__nvoc_init_OBJENGSTATE(&pThis->__nvoc_base_OBJENGSTATE);
|
||||
__nvoc_init_funcTable_OBJTMR(pThis, pRmhalspecowner);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OBJTMR(OBJTMR **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
OBJTMR *pThis;
|
||||
RmHalspecOwner *pRmhalspecowner;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(OBJTMR));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(OBJTMR));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJTMR);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_OBJENGSTATE.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
if ((pRmhalspecowner = dynamicCast(pParent, RmHalspecOwner)) == NULL)
|
||||
pRmhalspecowner = objFindAncestorOfType(RmHalspecOwner, pParent);
|
||||
NV_ASSERT_OR_RETURN(pRmhalspecowner != NULL, NV_ERR_INVALID_ARGUMENT);
|
||||
|
||||
__nvoc_init_OBJTMR(pThis, pRmhalspecowner);
|
||||
status = __nvoc_ctor_OBJTMR(pThis, pRmhalspecowner);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_OBJTMR_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_OBJTMR_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OBJTMR(OBJTMR **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_OBJTMR(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,323 +0,0 @@
|
||||
#define NVOC_OS_DESC_MEM_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_os_desc_mem_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xb3dacd = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OsDescMemory;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RsResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResourceCommon;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_RmResource;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Memory;
|
||||
|
||||
void __nvoc_init_OsDescMemory(OsDescMemory*);
|
||||
void __nvoc_init_funcTable_OsDescMemory(OsDescMemory*);
|
||||
NV_STATUS __nvoc_ctor_OsDescMemory(OsDescMemory*, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
void __nvoc_init_dataField_OsDescMemory(OsDescMemory*);
|
||||
void __nvoc_dtor_OsDescMemory(OsDescMemory*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OsDescMemory;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_OsDescMemory = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OsDescMemory,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OsDescMemory,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_RsResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RsResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_RmResourceCommon = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResourceCommon,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_RmResource = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_RmResource,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory.__nvoc_base_RmResource),
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OsDescMemory_Memory = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Memory,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OsDescMemory, __nvoc_base_Memory),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_OsDescMemory = {
|
||||
/*numRelatives=*/ 6,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_OsDescMemory_OsDescMemory,
|
||||
&__nvoc_rtti_OsDescMemory_Memory,
|
||||
&__nvoc_rtti_OsDescMemory_RmResource,
|
||||
&__nvoc_rtti_OsDescMemory_RmResourceCommon,
|
||||
&__nvoc_rtti_OsDescMemory_RsResource,
|
||||
&__nvoc_rtti_OsDescMemory_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_OsDescMemory =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(OsDescMemory),
|
||||
/*classId=*/ classId(OsDescMemory),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "OsDescMemory",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OsDescMemory,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_OsDescMemory,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_OsDescMemory
|
||||
};
|
||||
|
||||
static NvBool __nvoc_thunk_OsDescMemory_resCanCopy(struct RsResource *pOsDescMemory) {
|
||||
return osdescCanCopy((struct OsDescMemory *)(((unsigned char *)pOsDescMemory) - __nvoc_rtti_OsDescMemory_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_osdescCheckMemInterUnmap(struct OsDescMemory *pMemory, NvBool bSubdeviceHandleProvided) {
|
||||
return memCheckMemInterUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_osdescControl(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return memControl((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_osdescUnmap(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
|
||||
return memUnmap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_osdescGetMemInterMapParams(struct OsDescMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return memGetMemInterMapParams((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_osdescGetMemoryMappingDescriptor(struct OsDescMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return memGetMemoryMappingDescriptor((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), ppMemDesc);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_osdescGetMapAddrSpace(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return memGetMapAddrSpace((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_osdescShareCallback(struct OsDescMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return rmresShareCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RmResource.offset), pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_osdescControlFilter(struct OsDescMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return resControlFilter((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_osdescAddAdditionalDependants(struct RsClient *pClient, struct OsDescMemory *pResource, RsResourceRef *pReference) {
|
||||
resAddAdditionalDependants(pClient, (struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pReference);
|
||||
}
|
||||
|
||||
static NvU32 __nvoc_thunk_RsResource_osdescGetRefCount(struct OsDescMemory *pResource) {
|
||||
return resGetRefCount((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_osdescMapTo(struct OsDescMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return resMapTo((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RmResource_osdescControl_Prologue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return rmresControl_Prologue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_osdescIsReady(struct OsDescMemory *pMemory) {
|
||||
return memIsReady((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_osdescCheckCopyPermissions(struct OsDescMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) {
|
||||
return memCheckCopyPermissions((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pDstGpu, hDstClientNvBool);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RsResource_osdescPreDestruct(struct OsDescMemory *pResource) {
|
||||
resPreDestruct((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset));
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_osdescUnmapFrom(struct OsDescMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return resUnmapFrom((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pParams);
|
||||
}
|
||||
|
||||
static void __nvoc_thunk_RmResource_osdescControl_Epilogue(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
rmresControl_Epilogue((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RmResource.offset), pCallContext, pParams);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_RsResource_osdescControlLookup(struct OsDescMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return resControlLookup((struct RsResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RsResource.offset), pParams, ppEntry);
|
||||
}
|
||||
|
||||
static NV_STATUS __nvoc_thunk_Memory_osdescMap(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
|
||||
return memMap((struct Memory *)(((unsigned char *)pMemory) + __nvoc_rtti_OsDescMemory_Memory.offset), pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static NvBool __nvoc_thunk_RmResource_osdescAccessCallback(struct OsDescMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return rmresAccessCallback((struct RmResource *)(((unsigned char *)pResource) + __nvoc_rtti_OsDescMemory_RmResource.offset), pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_OsDescMemory =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Memory(Memory*);
|
||||
void __nvoc_dtor_OsDescMemory(OsDescMemory *pThis) {
|
||||
__nvoc_dtor_Memory(&pThis->__nvoc_base_Memory);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_OsDescMemory(OsDescMemory *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Memory(Memory* , CALL_CONTEXT *, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
NV_STATUS __nvoc_ctor_OsDescMemory(OsDescMemory *pThis, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_Memory(&pThis->__nvoc_base_Memory, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OsDescMemory_fail_Memory;
|
||||
__nvoc_init_dataField_OsDescMemory(pThis);
|
||||
|
||||
status = __nvoc_osdescConstruct(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OsDescMemory_fail__init;
|
||||
goto __nvoc_ctor_OsDescMemory_exit; // Success
|
||||
|
||||
__nvoc_ctor_OsDescMemory_fail__init:
|
||||
__nvoc_dtor_Memory(&pThis->__nvoc_base_Memory);
|
||||
__nvoc_ctor_OsDescMemory_fail_Memory:
|
||||
__nvoc_ctor_OsDescMemory_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_OsDescMemory_1(OsDescMemory *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
|
||||
pThis->__osdescCanCopy__ = &osdescCanCopy_IMPL;
|
||||
|
||||
pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__resCanCopy__ = &__nvoc_thunk_OsDescMemory_resCanCopy;
|
||||
|
||||
pThis->__osdescCheckMemInterUnmap__ = &__nvoc_thunk_Memory_osdescCheckMemInterUnmap;
|
||||
|
||||
pThis->__osdescControl__ = &__nvoc_thunk_Memory_osdescControl;
|
||||
|
||||
pThis->__osdescUnmap__ = &__nvoc_thunk_Memory_osdescUnmap;
|
||||
|
||||
pThis->__osdescGetMemInterMapParams__ = &__nvoc_thunk_Memory_osdescGetMemInterMapParams;
|
||||
|
||||
pThis->__osdescGetMemoryMappingDescriptor__ = &__nvoc_thunk_Memory_osdescGetMemoryMappingDescriptor;
|
||||
|
||||
pThis->__osdescGetMapAddrSpace__ = &__nvoc_thunk_Memory_osdescGetMapAddrSpace;
|
||||
|
||||
pThis->__osdescShareCallback__ = &__nvoc_thunk_RmResource_osdescShareCallback;
|
||||
|
||||
pThis->__osdescControlFilter__ = &__nvoc_thunk_RsResource_osdescControlFilter;
|
||||
|
||||
pThis->__osdescAddAdditionalDependants__ = &__nvoc_thunk_RsResource_osdescAddAdditionalDependants;
|
||||
|
||||
pThis->__osdescGetRefCount__ = &__nvoc_thunk_RsResource_osdescGetRefCount;
|
||||
|
||||
pThis->__osdescMapTo__ = &__nvoc_thunk_RsResource_osdescMapTo;
|
||||
|
||||
pThis->__osdescControl_Prologue__ = &__nvoc_thunk_RmResource_osdescControl_Prologue;
|
||||
|
||||
pThis->__osdescIsReady__ = &__nvoc_thunk_Memory_osdescIsReady;
|
||||
|
||||
pThis->__osdescCheckCopyPermissions__ = &__nvoc_thunk_Memory_osdescCheckCopyPermissions;
|
||||
|
||||
pThis->__osdescPreDestruct__ = &__nvoc_thunk_RsResource_osdescPreDestruct;
|
||||
|
||||
pThis->__osdescUnmapFrom__ = &__nvoc_thunk_RsResource_osdescUnmapFrom;
|
||||
|
||||
pThis->__osdescControl_Epilogue__ = &__nvoc_thunk_RmResource_osdescControl_Epilogue;
|
||||
|
||||
pThis->__osdescControlLookup__ = &__nvoc_thunk_RsResource_osdescControlLookup;
|
||||
|
||||
pThis->__osdescMap__ = &__nvoc_thunk_Memory_osdescMap;
|
||||
|
||||
pThis->__osdescAccessCallback__ = &__nvoc_thunk_RmResource_osdescAccessCallback;
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_OsDescMemory(OsDescMemory *pThis) {
|
||||
__nvoc_init_funcTable_OsDescMemory_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_Memory(Memory*);
|
||||
void __nvoc_init_OsDescMemory(OsDescMemory *pThis) {
|
||||
pThis->__nvoc_pbase_OsDescMemory = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object;
|
||||
pThis->__nvoc_pbase_RsResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource;
|
||||
pThis->__nvoc_pbase_RmResourceCommon = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RmResourceCommon;
|
||||
pThis->__nvoc_pbase_RmResource = &pThis->__nvoc_base_Memory.__nvoc_base_RmResource;
|
||||
pThis->__nvoc_pbase_Memory = &pThis->__nvoc_base_Memory;
|
||||
__nvoc_init_Memory(&pThis->__nvoc_base_Memory);
|
||||
__nvoc_init_funcTable_OsDescMemory(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OsDescMemory(OsDescMemory **ppThis, Dynamic *pParent, NvU32 createFlags, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
OsDescMemory *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(OsDescMemory));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(OsDescMemory));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OsDescMemory);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_Memory.__nvoc_base_RmResource.__nvoc_base_RsResource.__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_OsDescMemory(pThis);
|
||||
status = __nvoc_ctor_OsDescMemory(pThis, arg_pCallContext, arg_pParams);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_OsDescMemory_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_OsDescMemory_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OsDescMemory(OsDescMemory **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
CALL_CONTEXT * arg_pCallContext = va_arg(args, CALL_CONTEXT *);
|
||||
struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams = va_arg(args, struct RS_RES_ALLOC_PARAMS_INTERNAL *);
|
||||
|
||||
status = __nvoc_objCreate_OsDescMemory(ppThis, pParent, createFlags, arg_pCallContext, arg_pParams);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@@ -1,224 +0,0 @@
|
||||
#ifndef _G_OS_DESC_MEM_NVOC_H_
|
||||
#define _G_OS_DESC_MEM_NVOC_H_
|
||||
#include "nvoc/runtime.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
* SPDX-FileCopyrightText: Copyright (c) 1993-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "g_os_desc_mem_nvoc.h"
|
||||
|
||||
#ifndef _OS_DESC_MEMORY_H_
|
||||
#define _OS_DESC_MEMORY_H_
|
||||
|
||||
#include "mem_mgr/mem.h"
|
||||
|
||||
/*!
|
||||
* Bind memory allocated through os descriptor
|
||||
*/
|
||||
#ifdef NVOC_OS_DESC_MEM_H_PRIVATE_ACCESS_ALLOWED
|
||||
#define PRIVATE_FIELD(x) x
|
||||
#else
|
||||
#define PRIVATE_FIELD(x) NVOC_PRIVATE_FIELD(x)
|
||||
#endif
|
||||
struct OsDescMemory {
|
||||
const struct NVOC_RTTI *__nvoc_rtti;
|
||||
struct Memory __nvoc_base_Memory;
|
||||
struct Object *__nvoc_pbase_Object;
|
||||
struct RsResource *__nvoc_pbase_RsResource;
|
||||
struct RmResourceCommon *__nvoc_pbase_RmResourceCommon;
|
||||
struct RmResource *__nvoc_pbase_RmResource;
|
||||
struct Memory *__nvoc_pbase_Memory;
|
||||
struct OsDescMemory *__nvoc_pbase_OsDescMemory;
|
||||
NvBool (*__osdescCanCopy__)(struct OsDescMemory *);
|
||||
NV_STATUS (*__osdescCheckMemInterUnmap__)(struct OsDescMemory *, NvBool);
|
||||
NV_STATUS (*__osdescControl__)(struct OsDescMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__osdescUnmap__)(struct OsDescMemory *, CALL_CONTEXT *, RsCpuMapping *);
|
||||
NV_STATUS (*__osdescGetMemInterMapParams__)(struct OsDescMemory *, RMRES_MEM_INTER_MAP_PARAMS *);
|
||||
NV_STATUS (*__osdescGetMemoryMappingDescriptor__)(struct OsDescMemory *, MEMORY_DESCRIPTOR **);
|
||||
NV_STATUS (*__osdescGetMapAddrSpace__)(struct OsDescMemory *, CALL_CONTEXT *, NvU32, NV_ADDRESS_SPACE *);
|
||||
NvBool (*__osdescShareCallback__)(struct OsDescMemory *, struct RsClient *, struct RsResourceRef *, RS_SHARE_POLICY *);
|
||||
NV_STATUS (*__osdescControlFilter__)(struct OsDescMemory *, struct CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
void (*__osdescAddAdditionalDependants__)(struct RsClient *, struct OsDescMemory *, RsResourceRef *);
|
||||
NvU32 (*__osdescGetRefCount__)(struct OsDescMemory *);
|
||||
NV_STATUS (*__osdescMapTo__)(struct OsDescMemory *, RS_RES_MAP_TO_PARAMS *);
|
||||
NV_STATUS (*__osdescControl_Prologue__)(struct OsDescMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__osdescIsReady__)(struct OsDescMemory *);
|
||||
NV_STATUS (*__osdescCheckCopyPermissions__)(struct OsDescMemory *, struct OBJGPU *, NvHandle);
|
||||
void (*__osdescPreDestruct__)(struct OsDescMemory *);
|
||||
NV_STATUS (*__osdescUnmapFrom__)(struct OsDescMemory *, RS_RES_UNMAP_FROM_PARAMS *);
|
||||
void (*__osdescControl_Epilogue__)(struct OsDescMemory *, CALL_CONTEXT *, struct RS_RES_CONTROL_PARAMS_INTERNAL *);
|
||||
NV_STATUS (*__osdescControlLookup__)(struct OsDescMemory *, struct RS_RES_CONTROL_PARAMS_INTERNAL *, const struct NVOC_EXPORTED_METHOD_DEF **);
|
||||
NV_STATUS (*__osdescMap__)(struct OsDescMemory *, CALL_CONTEXT *, struct RS_CPU_MAP_PARAMS *, RsCpuMapping *);
|
||||
NvBool (*__osdescAccessCallback__)(struct OsDescMemory *, struct RsClient *, void *, RsAccessRight);
|
||||
};
|
||||
|
||||
#ifndef __NVOC_CLASS_OsDescMemory_TYPEDEF__
|
||||
#define __NVOC_CLASS_OsDescMemory_TYPEDEF__
|
||||
typedef struct OsDescMemory OsDescMemory;
|
||||
#endif /* __NVOC_CLASS_OsDescMemory_TYPEDEF__ */
|
||||
|
||||
#ifndef __nvoc_class_id_OsDescMemory
|
||||
#define __nvoc_class_id_OsDescMemory 0xb3dacd
|
||||
#endif /* __nvoc_class_id_OsDescMemory */
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OsDescMemory;
|
||||
|
||||
#define __staticCast_OsDescMemory(pThis) \
|
||||
((pThis)->__nvoc_pbase_OsDescMemory)
|
||||
|
||||
#ifdef __nvoc_os_desc_mem_h_disabled
|
||||
#define __dynamicCast_OsDescMemory(pThis) ((OsDescMemory*)NULL)
|
||||
#else //__nvoc_os_desc_mem_h_disabled
|
||||
#define __dynamicCast_OsDescMemory(pThis) \
|
||||
((OsDescMemory*)__nvoc_dynamicCast(staticCast((pThis), Dynamic), classInfo(OsDescMemory)))
|
||||
#endif //__nvoc_os_desc_mem_h_disabled
|
||||
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OsDescMemory(OsDescMemory**, Dynamic*, NvU32, va_list);
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OsDescMemory(OsDescMemory**, Dynamic*, NvU32, CALL_CONTEXT * arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL * arg_pParams);
|
||||
#define __objCreate_OsDescMemory(ppNewObj, pParent, createFlags, arg_pCallContext, arg_pParams) \
|
||||
__nvoc_objCreate_OsDescMemory((ppNewObj), staticCast((pParent), Dynamic), (createFlags), arg_pCallContext, arg_pParams)
|
||||
|
||||
#define osdescCanCopy(pOsDescMemory) osdescCanCopy_DISPATCH(pOsDescMemory)
|
||||
#define osdescCheckMemInterUnmap(pMemory, bSubdeviceHandleProvided) osdescCheckMemInterUnmap_DISPATCH(pMemory, bSubdeviceHandleProvided)
|
||||
#define osdescControl(pMemory, pCallContext, pParams) osdescControl_DISPATCH(pMemory, pCallContext, pParams)
|
||||
#define osdescUnmap(pMemory, pCallContext, pCpuMapping) osdescUnmap_DISPATCH(pMemory, pCallContext, pCpuMapping)
|
||||
#define osdescGetMemInterMapParams(pMemory, pParams) osdescGetMemInterMapParams_DISPATCH(pMemory, pParams)
|
||||
#define osdescGetMemoryMappingDescriptor(pMemory, ppMemDesc) osdescGetMemoryMappingDescriptor_DISPATCH(pMemory, ppMemDesc)
|
||||
#define osdescGetMapAddrSpace(pMemory, pCallContext, mapFlags, pAddrSpace) osdescGetMapAddrSpace_DISPATCH(pMemory, pCallContext, mapFlags, pAddrSpace)
|
||||
#define osdescShareCallback(pResource, pInvokingClient, pParentRef, pSharePolicy) osdescShareCallback_DISPATCH(pResource, pInvokingClient, pParentRef, pSharePolicy)
|
||||
#define osdescControlFilter(pResource, pCallContext, pParams) osdescControlFilter_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define osdescAddAdditionalDependants(pClient, pResource, pReference) osdescAddAdditionalDependants_DISPATCH(pClient, pResource, pReference)
|
||||
#define osdescGetRefCount(pResource) osdescGetRefCount_DISPATCH(pResource)
|
||||
#define osdescMapTo(pResource, pParams) osdescMapTo_DISPATCH(pResource, pParams)
|
||||
#define osdescControl_Prologue(pResource, pCallContext, pParams) osdescControl_Prologue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define osdescIsReady(pMemory) osdescIsReady_DISPATCH(pMemory)
|
||||
#define osdescCheckCopyPermissions(pMemory, pDstGpu, hDstClientNvBool) osdescCheckCopyPermissions_DISPATCH(pMemory, pDstGpu, hDstClientNvBool)
|
||||
#define osdescPreDestruct(pResource) osdescPreDestruct_DISPATCH(pResource)
|
||||
#define osdescUnmapFrom(pResource, pParams) osdescUnmapFrom_DISPATCH(pResource, pParams)
|
||||
#define osdescControl_Epilogue(pResource, pCallContext, pParams) osdescControl_Epilogue_DISPATCH(pResource, pCallContext, pParams)
|
||||
#define osdescControlLookup(pResource, pParams, ppEntry) osdescControlLookup_DISPATCH(pResource, pParams, ppEntry)
|
||||
#define osdescMap(pMemory, pCallContext, pParams, pCpuMapping) osdescMap_DISPATCH(pMemory, pCallContext, pParams, pCpuMapping)
|
||||
#define osdescAccessCallback(pResource, pInvokingClient, pAllocParams, accessRight) osdescAccessCallback_DISPATCH(pResource, pInvokingClient, pAllocParams, accessRight)
|
||||
NvBool osdescCanCopy_IMPL(struct OsDescMemory *pOsDescMemory);
|
||||
|
||||
static inline NvBool osdescCanCopy_DISPATCH(struct OsDescMemory *pOsDescMemory) {
|
||||
return pOsDescMemory->__osdescCanCopy__(pOsDescMemory);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescCheckMemInterUnmap_DISPATCH(struct OsDescMemory *pMemory, NvBool bSubdeviceHandleProvided) {
|
||||
return pMemory->__osdescCheckMemInterUnmap__(pMemory, bSubdeviceHandleProvided);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescControl_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pMemory->__osdescControl__(pMemory, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescUnmap_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, RsCpuMapping *pCpuMapping) {
|
||||
return pMemory->__osdescUnmap__(pMemory, pCallContext, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescGetMemInterMapParams_DISPATCH(struct OsDescMemory *pMemory, RMRES_MEM_INTER_MAP_PARAMS *pParams) {
|
||||
return pMemory->__osdescGetMemInterMapParams__(pMemory, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescGetMemoryMappingDescriptor_DISPATCH(struct OsDescMemory *pMemory, MEMORY_DESCRIPTOR **ppMemDesc) {
|
||||
return pMemory->__osdescGetMemoryMappingDescriptor__(pMemory, ppMemDesc);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescGetMapAddrSpace_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, NvU32 mapFlags, NV_ADDRESS_SPACE *pAddrSpace) {
|
||||
return pMemory->__osdescGetMapAddrSpace__(pMemory, pCallContext, mapFlags, pAddrSpace);
|
||||
}
|
||||
|
||||
static inline NvBool osdescShareCallback_DISPATCH(struct OsDescMemory *pResource, struct RsClient *pInvokingClient, struct RsResourceRef *pParentRef, RS_SHARE_POLICY *pSharePolicy) {
|
||||
return pResource->__osdescShareCallback__(pResource, pInvokingClient, pParentRef, pSharePolicy);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescControlFilter_DISPATCH(struct OsDescMemory *pResource, struct CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__osdescControlFilter__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline void osdescAddAdditionalDependants_DISPATCH(struct RsClient *pClient, struct OsDescMemory *pResource, RsResourceRef *pReference) {
|
||||
pResource->__osdescAddAdditionalDependants__(pClient, pResource, pReference);
|
||||
}
|
||||
|
||||
static inline NvU32 osdescGetRefCount_DISPATCH(struct OsDescMemory *pResource) {
|
||||
return pResource->__osdescGetRefCount__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescMapTo_DISPATCH(struct OsDescMemory *pResource, RS_RES_MAP_TO_PARAMS *pParams) {
|
||||
return pResource->__osdescMapTo__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescControl_Prologue_DISPATCH(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
return pResource->__osdescControl_Prologue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescIsReady_DISPATCH(struct OsDescMemory *pMemory) {
|
||||
return pMemory->__osdescIsReady__(pMemory);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescCheckCopyPermissions_DISPATCH(struct OsDescMemory *pMemory, struct OBJGPU *pDstGpu, NvHandle hDstClientNvBool) {
|
||||
return pMemory->__osdescCheckCopyPermissions__(pMemory, pDstGpu, hDstClientNvBool);
|
||||
}
|
||||
|
||||
static inline void osdescPreDestruct_DISPATCH(struct OsDescMemory *pResource) {
|
||||
pResource->__osdescPreDestruct__(pResource);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescUnmapFrom_DISPATCH(struct OsDescMemory *pResource, RS_RES_UNMAP_FROM_PARAMS *pParams) {
|
||||
return pResource->__osdescUnmapFrom__(pResource, pParams);
|
||||
}
|
||||
|
||||
static inline void osdescControl_Epilogue_DISPATCH(struct OsDescMemory *pResource, CALL_CONTEXT *pCallContext, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams) {
|
||||
pResource->__osdescControl_Epilogue__(pResource, pCallContext, pParams);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescControlLookup_DISPATCH(struct OsDescMemory *pResource, struct RS_RES_CONTROL_PARAMS_INTERNAL *pParams, const struct NVOC_EXPORTED_METHOD_DEF **ppEntry) {
|
||||
return pResource->__osdescControlLookup__(pResource, pParams, ppEntry);
|
||||
}
|
||||
|
||||
static inline NV_STATUS osdescMap_DISPATCH(struct OsDescMemory *pMemory, CALL_CONTEXT *pCallContext, struct RS_CPU_MAP_PARAMS *pParams, RsCpuMapping *pCpuMapping) {
|
||||
return pMemory->__osdescMap__(pMemory, pCallContext, pParams, pCpuMapping);
|
||||
}
|
||||
|
||||
static inline NvBool osdescAccessCallback_DISPATCH(struct OsDescMemory *pResource, struct RsClient *pInvokingClient, void *pAllocParams, RsAccessRight accessRight) {
|
||||
return pResource->__osdescAccessCallback__(pResource, pInvokingClient, pAllocParams, accessRight);
|
||||
}
|
||||
|
||||
NV_STATUS osdescConstruct_IMPL(struct OsDescMemory *arg_pOsDescMemory, CALL_CONTEXT *arg_pCallContext, struct RS_RES_ALLOC_PARAMS_INTERNAL *arg_pParams);
|
||||
#define __nvoc_osdescConstruct(arg_pOsDescMemory, arg_pCallContext, arg_pParams) osdescConstruct_IMPL(arg_pOsDescMemory, arg_pCallContext, arg_pParams)
|
||||
#undef PRIVATE_FIELD
|
||||
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
#endif // _G_OS_DESC_MEM_NVOC_H_
|
||||
@@ -1,149 +0,0 @@
|
||||
#define NVOC_OS_H_PRIVATE_ACCESS_ALLOWED
|
||||
#include "nvoc/runtime.h"
|
||||
#include "nvoc/rtti.h"
|
||||
#include "nvtypes.h"
|
||||
#include "nvport/nvport.h"
|
||||
#include "nvport/inline/util_valist.h"
|
||||
#include "utils/nvassert.h"
|
||||
#include "g_os_nvoc.h"
|
||||
|
||||
#ifdef DEBUG
|
||||
char __nvoc_class_id_uniqueness_check_0xaa1d70 = 1;
|
||||
#endif
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_OBJOS;
|
||||
|
||||
extern const struct NVOC_CLASS_DEF __nvoc_class_def_Object;
|
||||
|
||||
void __nvoc_init_OBJOS(OBJOS*);
|
||||
void __nvoc_init_funcTable_OBJOS(OBJOS*);
|
||||
NV_STATUS __nvoc_ctor_OBJOS(OBJOS*);
|
||||
void __nvoc_init_dataField_OBJOS(OBJOS*);
|
||||
void __nvoc_dtor_OBJOS(OBJOS*);
|
||||
extern const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJOS;
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJOS_OBJOS = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_OBJOS,
|
||||
/*dtor=*/ (NVOC_DYNAMIC_DTOR) &__nvoc_dtor_OBJOS,
|
||||
/*offset=*/ 0,
|
||||
};
|
||||
|
||||
static const struct NVOC_RTTI __nvoc_rtti_OBJOS_Object = {
|
||||
/*pClassDef=*/ &__nvoc_class_def_Object,
|
||||
/*dtor=*/ &__nvoc_destructFromBase,
|
||||
/*offset=*/ NV_OFFSETOF(OBJOS, __nvoc_base_Object),
|
||||
};
|
||||
|
||||
static const struct NVOC_CASTINFO __nvoc_castinfo_OBJOS = {
|
||||
/*numRelatives=*/ 2,
|
||||
/*relatives=*/ {
|
||||
&__nvoc_rtti_OBJOS_OBJOS,
|
||||
&__nvoc_rtti_OBJOS_Object,
|
||||
},
|
||||
};
|
||||
|
||||
const struct NVOC_CLASS_DEF __nvoc_class_def_OBJOS =
|
||||
{
|
||||
/*classInfo=*/ {
|
||||
/*size=*/ sizeof(OBJOS),
|
||||
/*classId=*/ classId(OBJOS),
|
||||
/*providerId=*/ &__nvoc_rtti_provider,
|
||||
#if NV_PRINTF_STRINGS_ALLOWED
|
||||
/*name=*/ "OBJOS",
|
||||
#endif
|
||||
},
|
||||
/*objCreatefn=*/ (NVOC_DYNAMIC_OBJ_CREATE) &__nvoc_objCreateDynamic_OBJOS,
|
||||
/*pCastInfo=*/ &__nvoc_castinfo_OBJOS,
|
||||
/*pExportInfo=*/ &__nvoc_export_info_OBJOS
|
||||
};
|
||||
|
||||
const struct NVOC_EXPORT_INFO __nvoc_export_info_OBJOS =
|
||||
{
|
||||
/*numEntries=*/ 0,
|
||||
/*pExportEntries=*/ 0
|
||||
};
|
||||
|
||||
void __nvoc_dtor_Object(Object*);
|
||||
void __nvoc_dtor_OBJOS(OBJOS *pThis) {
|
||||
__nvoc_dtor_Object(&pThis->__nvoc_base_Object);
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_dataField_OBJOS(OBJOS *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
pThis->setProperty(pThis, PDB_PROP_OS_SUPPORTS_DISPLAY_REMAPPER, !(1));
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_ctor_Object(Object* );
|
||||
NV_STATUS __nvoc_ctor_OBJOS(OBJOS *pThis) {
|
||||
NV_STATUS status = NV_OK;
|
||||
status = __nvoc_ctor_Object(&pThis->__nvoc_base_Object);
|
||||
if (status != NV_OK) goto __nvoc_ctor_OBJOS_fail_Object;
|
||||
__nvoc_init_dataField_OBJOS(pThis);
|
||||
goto __nvoc_ctor_OBJOS_exit; // Success
|
||||
|
||||
__nvoc_ctor_OBJOS_fail_Object:
|
||||
__nvoc_ctor_OBJOS_exit:
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static void __nvoc_init_funcTable_OBJOS_1(OBJOS *pThis) {
|
||||
PORT_UNREFERENCED_VARIABLE(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_funcTable_OBJOS(OBJOS *pThis) {
|
||||
__nvoc_init_funcTable_OBJOS_1(pThis);
|
||||
}
|
||||
|
||||
void __nvoc_init_Object(Object*);
|
||||
void __nvoc_init_OBJOS(OBJOS *pThis) {
|
||||
pThis->__nvoc_pbase_OBJOS = pThis;
|
||||
pThis->__nvoc_pbase_Object = &pThis->__nvoc_base_Object;
|
||||
__nvoc_init_Object(&pThis->__nvoc_base_Object);
|
||||
__nvoc_init_funcTable_OBJOS(pThis);
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreate_OBJOS(OBJOS **ppThis, Dynamic *pParent, NvU32 createFlags) {
|
||||
NV_STATUS status;
|
||||
Object *pParentObj;
|
||||
OBJOS *pThis;
|
||||
|
||||
pThis = portMemAllocNonPaged(sizeof(OBJOS));
|
||||
if (pThis == NULL) return NV_ERR_NO_MEMORY;
|
||||
|
||||
portMemSet(pThis, 0, sizeof(OBJOS));
|
||||
|
||||
__nvoc_initRtti(staticCast(pThis, Dynamic), &__nvoc_class_def_OBJOS);
|
||||
|
||||
if (pParent != NULL && !(createFlags & NVOC_OBJ_CREATE_FLAGS_PARENT_HALSPEC_ONLY))
|
||||
{
|
||||
pParentObj = dynamicCast(pParent, Object);
|
||||
objAddChild(pParentObj, &pThis->__nvoc_base_Object);
|
||||
}
|
||||
else
|
||||
{
|
||||
pThis->__nvoc_base_Object.pParent = NULL;
|
||||
}
|
||||
|
||||
__nvoc_init_OBJOS(pThis);
|
||||
status = __nvoc_ctor_OBJOS(pThis);
|
||||
if (status != NV_OK) goto __nvoc_objCreate_OBJOS_cleanup;
|
||||
|
||||
*ppThis = pThis;
|
||||
return NV_OK;
|
||||
|
||||
__nvoc_objCreate_OBJOS_cleanup:
|
||||
// do not call destructors here since the constructor already called them
|
||||
portMemFree(pThis);
|
||||
return status;
|
||||
}
|
||||
|
||||
NV_STATUS __nvoc_objCreateDynamic_OBJOS(OBJOS **ppThis, Dynamic *pParent, NvU32 createFlags, va_list args) {
|
||||
NV_STATUS status;
|
||||
|
||||
status = __nvoc_objCreate_OBJOS(ppThis, pParent, createFlags);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user