diff --git a/NVIDIA-REVIEWERS b/NVIDIA-REVIEWERS index 99b63bdb6..55dcaa70c 100644 --- a/NVIDIA-REVIEWERS +++ b/NVIDIA-REVIEWERS @@ -63,5 +63,3 @@ S: Supported F: drivers/gpu/nvgpu/* F: include/* F: ../../gpu-firmware-private/ - - diff --git a/drivers/gpu/nvgpu/Kconfig b/drivers/gpu/nvgpu/Kconfig index 4f90a35c7..7a9a99c6f 100644 --- a/drivers/gpu/nvgpu/Kconfig +++ b/drivers/gpu/nvgpu/Kconfig @@ -136,3 +136,10 @@ config GK20A_VIDMEM Enable support for using and allocating buffers in a distinct video memory aperture (in contrast to general system memory), available on GPUs that have their own banks. PCIe GPUs have this, for example. + +config TEGRA_19x_GPU + bool "Tegra 19x family GPU" + depends on GK20A && ARCH_TEGRA_19x_SOC + default y + help + Support for NVIDIA Tegra 19x family of GPU diff --git a/drivers/gpu/nvgpu/Makefile b/drivers/gpu/nvgpu/Makefile index f3338dc2b..9a7a3d071 100644 --- a/drivers/gpu/nvgpu/Makefile +++ b/drivers/gpu/nvgpu/Makefile @@ -258,3 +258,51 @@ nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \ vgpu/gp10b/vgpu_gr_gp10b.o \ vgpu/gp10b/vgpu_mm_gp10b.o endif + +ifeq ($(CONFIG_ARCH_TEGRA_19x_SOC),y) +nvgpu-y += \ + common/mm/gmmu_t19x.o \ + common/linux/ioctl_tsg_t19x.o \ + common/linux/ioctl_ctrl_t19x.o \ + common/linux/io_t19x.o \ + common/linux/module_t19x.o \ + common/linux/pci_t19x.o \ + gv11b/gv11b.o \ + gv11b/css_gr_gv11b.o \ + gv11b/dbg_gpu_gv11b.o \ + gv11b/mc_gv11b.o \ + gv11b/ltc_gv11b.o \ + gv11b/hal_gv11b.o \ + gv11b/gv11b_gating_reglist.o \ + gv11b/gr_gv11b.o \ + gv11b/fb_gv11b.o \ + gv11b/fifo_gv11b.o \ + gv11b/mm_gv11b.o \ + gv11b/ce_gv11b.o \ + gv11b/gr_ctx_gv11b.o \ + gv11b/pmu_gv11b.o \ + gv11b/acr_gv11b.o \ + gv11b/subctx_gv11b.o \ + gv11b/regops_gv11b.o \ + gv11b/therm_gv11b.o \ + gv100/mm_gv100.o \ + gv100/gr_ctx_gv100.o \ + gv100/fb_gv100.o \ + gv100/bios_gv100.o \ + gv100/fifo_gv100.o \ + gv100/gr_gv100.o \ + gv100/regops_gv100.o \ + gv100/hal_gv100.o + +nvgpu-$(CONFIG_TEGRA_GK20A) += gv11b/platform_gv11b_tegra.o +nvgpu-$(CONFIG_TEGRA_GK20A_NVHOST) += common/linux/nvhost_t19x.o + +nvgpu-$(CONFIG_TEGRA_GR_VIRTUALIZATION) += \ + vgpu/gv11b/platform_gv11b_vgpu_tegra.o \ + vgpu/gv11b/vgpu_gv11b.o \ + vgpu/gv11b/vgpu_hal_gv11b.o \ + vgpu/gv11b/vgpu_gr_gv11b.o \ + vgpu/gv11b/vgpu_fifo_gv11b.o \ + vgpu/gv11b/vgpu_subctx_gv11b.o \ + vgpu/gv11b/vgpu_tsg_gv11b.o +endif diff --git a/drivers/gpu/nvgpu/acr_t19x.h b/drivers/gpu/nvgpu/acr_t19x.h new file mode 100644 index 000000000..0693c6a15 --- /dev/null +++ b/drivers/gpu/nvgpu/acr_t19x.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVGPU_ACR_T19X_H_ +#define _NVGPU_ACR_T19X_H_ + +#define BIGGPU_FECS_UCODE_SIG "gv100/fecs_sig.bin" +#define BIGGPU_GPCCS_UCODE_SIG "gv100/gpccs_sig.bin" + +#endif diff --git a/drivers/gpu/nvgpu/channel_t19x.h b/drivers/gpu/nvgpu/channel_t19x.h new file mode 100644 index 000000000..d3cb71a1c --- /dev/null +++ b/drivers/gpu/nvgpu/channel_t19x.h @@ -0,0 +1,33 @@ +/* + * NVIDIA T19x Channel info + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVGPU_CHANNEL_T19X_H_ +#define _NVGPU_CHANNEL_T19X_H_ + +struct channel_t19x { + u32 subctx_id; + u32 runqueue_sel; +}; + +#endif diff --git a/drivers/gpu/nvgpu/common/linux/io_t19x.c b/drivers/gpu/nvgpu/common/linux/io_t19x.c new file mode 100644 index 000000000..5c6b76ba3 --- /dev/null +++ b/drivers/gpu/nvgpu/common/linux/io_t19x.c @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include + +#include "common/linux/os_linux.h" +#include "gk20a/gk20a.h" + +#include + +void gv11b_usermode_writel(struct gk20a *g, u32 r, u32 v) +{ + struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); + void __iomem *reg = l->t19x.usermode_regs + (r - usermode_cfg0_r()); + + writel_relaxed(v, reg); + gk20a_dbg(gpu_dbg_reg, "usermode r=0x%x v=0x%x", r, v); +} diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.c b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.c new file mode 100644 index 000000000..a04fb5c91 --- /dev/null +++ b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.c @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include + +#include +#include +#include + +#include "ioctl_ctrl_t19x.h" +#include "common/linux/os_linux.h" +#include "gk20a/gk20a.h" + +u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags_t19x(struct gk20a *g) +{ + u64 ioctl_flags = 0; + + if (nvgpu_is_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS)) + ioctl_flags |= NVGPU_GPU_FLAGS_SUPPORT_TSG_SUBCONTEXTS; + + return ioctl_flags; +} + diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.h b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.h new file mode 100644 index 000000000..641412232 --- /dev/null +++ b/drivers/gpu/nvgpu/common/linux/ioctl_ctrl_t19x.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _NVGPU_IOCTL_CTRL_T19X +#define _NVGPU_IOCTL_CTRL_T19X + +#include + +struct gk20a; + +u64 nvgpu_ctrl_ioctl_gpu_characteristics_flags_t19x(struct gk20a *g); + +#endif diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.c b/drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.c new file mode 100644 index 000000000..1c96db69e --- /dev/null +++ b/drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.c @@ -0,0 +1,115 @@ +/* + * GV11B TSG IOCTL Handler + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include + +#include "gk20a/gk20a.h" + +#include "gv11b/fifo_gv11b.h" +#include "gv11b/subctx_gv11b.h" +#include "ioctl_tsg_t19x.h" +#include "common/linux/os_linux.h" + +static int gv11b_tsg_ioctl_bind_channel_ex(struct gk20a *g, + struct tsg_gk20a *tsg, struct nvgpu_tsg_bind_channel_ex_args *arg) +{ + struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); + struct gk20a_sched_ctrl *sched = &l->sched_ctrl; + struct channel_gk20a *ch; + struct gr_gk20a *gr = &g->gr; + int err = 0; + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_sched, "tsgid=%u", tsg->tsgid); + + nvgpu_mutex_acquire(&sched->control_lock); + if (sched->control_locked) { + err = -EPERM; + goto mutex_release; + } + err = gk20a_busy(g); + if (err) { + nvgpu_err(g, "failed to power on gpu"); + goto mutex_release; + } + + ch = gk20a_get_channel_from_file(arg->channel_fd); + if (!ch) { + err = -EINVAL; + goto idle; + } + + if (arg->tpc_pg_enabled && (!tsg->t19x.tpc_num_initialized)) { + if ((arg->num_active_tpcs > gr->max_tpc_count) || + !(arg->num_active_tpcs)) { + nvgpu_err(g, "Invalid num of active TPCs"); + err = -EINVAL; + goto ch_put; + } + tsg->t19x.tpc_num_initialized = true; + tsg->t19x.num_active_tpcs = arg->num_active_tpcs; + tsg->t19x.tpc_pg_enabled = true; + } else { + tsg->t19x.tpc_pg_enabled = false; + nvgpu_log(g, gpu_dbg_info, "dynamic TPC-PG not enabled"); + } + + if (arg->subcontext_id < g->fifo.t19x.max_subctx_count) { + ch->t19x.subctx_id = arg->subcontext_id; + } else { + err = -EINVAL; + goto ch_put; + } + + nvgpu_log(g, gpu_dbg_info, "channel id : %d : subctx: %d", + ch->chid, ch->t19x.subctx_id); + + /* Use runqueue selector 1 for all ASYNC ids */ + if (ch->t19x.subctx_id > CHANNEL_INFO_VEID0) + ch->t19x.runqueue_sel = 1; + + err = ch->g->ops.fifo.tsg_bind_channel(tsg, ch); +ch_put: + gk20a_channel_put(ch); +idle: + gk20a_idle(g); +mutex_release: + nvgpu_mutex_release(&sched->control_lock); + return err; +} + +int t19x_tsg_ioctl_handler(struct gk20a *g, struct tsg_gk20a *tsg, + unsigned int cmd, u8 *buf) +{ + int err = 0; + + nvgpu_log(g, gpu_dbg_fn, "t19x_tsg_ioctl_handler"); + + switch (cmd) { + case NVGPU_TSG_IOCTL_BIND_CHANNEL_EX: + { + err = gv11b_tsg_ioctl_bind_channel_ex(g, tsg, + (struct nvgpu_tsg_bind_channel_ex_args *)buf); + break; + } + + default: + nvgpu_err(g, "unrecognized tsg gpu ioctl cmd: 0x%x", + cmd); + err = -ENOTTY; + break; + } + return err; +} diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.h b/drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.h new file mode 100644 index 000000000..3376ffced --- /dev/null +++ b/drivers/gpu/nvgpu/common/linux/ioctl_tsg_t19x.h @@ -0,0 +1,21 @@ +/* + * GV11B TSG IOCTL handler + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _NVGPU_IOCTL_TSG_T19X +#define _NVGPU_IOCTL_TSG_T19X + +int t19x_tsg_ioctl_handler(struct gk20a *g, struct tsg_gk20a *tsg, + unsigned int cmd, u8 *arg); +#endif diff --git a/drivers/gpu/nvgpu/common/linux/module_t19x.c b/drivers/gpu/nvgpu/common/linux/module_t19x.c new file mode 100644 index 000000000..f0e3d4381 --- /dev/null +++ b/drivers/gpu/nvgpu/common/linux/module_t19x.c @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +#include + +#include "common/linux/os_linux.h" + +/* + * Locks out the driver from accessing GPU registers. This prevents access to + * thse registers after the GPU has been clock or power gated. This should help + * find annoying bugs where register reads and writes are silently dropped + * after the GPU has been turned off. On older chips these reads and writes can + * also lock the entire CPU up. + */ +void t19x_lockout_registers(struct gk20a *g) +{ + struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); + + l->t19x.usermode_regs = NULL; +} + +/* + * Undoes t19x_lockout_registers(). + */ +void t19x_restore_registers(struct gk20a *g) +{ + struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); + + l->t19x.usermode_regs = l->t19x.usermode_regs_saved; +} + +void t19x_remove_support(struct gk20a *g) +{ + struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); + + if (l->t19x.usermode_regs) { + l->t19x.usermode_regs = NULL; + } +} + +void t19x_init_support(struct gk20a *g) +{ + struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); + + l->t19x.usermode_regs = l->regs + usermode_cfg0_r(); + l->t19x.usermode_regs_saved = l->t19x.usermode_regs; +} diff --git a/drivers/gpu/nvgpu/common/linux/nvhost_t19x.c b/drivers/gpu/nvgpu/common/linux/nvhost_t19x.c new file mode 100644 index 000000000..21cf62ec8 --- /dev/null +++ b/drivers/gpu/nvgpu/common/linux/nvhost_t19x.c @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +#include + +#include "common/linux/nvhost_priv.h" + +int nvgpu_nvhost_syncpt_unit_interface_get_aperture( + struct nvgpu_nvhost_dev *nvhost_dev, + u64 *base, size_t *size) +{ + return nvhost_syncpt_unit_interface_get_aperture( + nvhost_dev->host1x_pdev, (phys_addr_t *)base, size); +} + +u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id) +{ + return nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id); +} diff --git a/drivers/gpu/nvgpu/common/linux/pci_t19x.c b/drivers/gpu/nvgpu/common/linux/pci_t19x.c new file mode 100644 index 000000000..54efd68e3 --- /dev/null +++ b/drivers/gpu/nvgpu/common/linux/pci_t19x.c @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include + +#include + +#include "common/linux/os_linux.h" + +void t19x_nvgpu_pci_init_support(struct nvgpu_os_linux *l) +{ + l->t19x.usermode_regs = l->regs + usermode_cfg0_r(); + l->t19x.usermode_regs_saved = l->t19x.usermode_regs; +} diff --git a/drivers/gpu/nvgpu/common/mm/gmmu_t19x.c b/drivers/gpu/nvgpu/common/mm/gmmu_t19x.c new file mode 100644 index 000000000..9f9c188dc --- /dev/null +++ b/drivers/gpu/nvgpu/common/mm/gmmu_t19x.c @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include + +void nvgpu_gmmu_add_t19x_attrs(struct nvgpu_gmmu_attrs *attrs, u32 flags) +{ + attrs->t19x_attrs.l3_alloc = (bool)(flags & + NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC); +} diff --git a/drivers/gpu/nvgpu/ecc_t19x.h b/drivers/gpu/nvgpu/ecc_t19x.h new file mode 100644 index 000000000..5b571ce1f --- /dev/null +++ b/drivers/gpu/nvgpu/ecc_t19x.h @@ -0,0 +1,29 @@ +/* + * NVIDIA T19x ECC + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVGPU_ECC_T19X_H_ +#define _NVGPU_ECC_T19X_H_ + +#include "gv11b/ecc_gv11b.h" + +#endif diff --git a/drivers/gpu/nvgpu/fifo_t19x.h b/drivers/gpu/nvgpu/fifo_t19x.h new file mode 100644 index 000000000..7274d1fef --- /dev/null +++ b/drivers/gpu/nvgpu/fifo_t19x.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _FIFO_T19X_H_ +#define _FIFO_T19X_H_ + +struct fifo_t19x { + u32 max_subctx_count; +}; + +#endif diff --git a/drivers/gpu/nvgpu/gr_t19x.h b/drivers/gpu/nvgpu/gr_t19x.h new file mode 100644 index 000000000..954472faf --- /dev/null +++ b/drivers/gpu/nvgpu/gr_t19x.h @@ -0,0 +1,29 @@ +/* + * NVIDIA T19x GR + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVGPU_GR_T19X_H_ +#define _NVGPU_GR_T19X_H_ + +#include "gv11b/gr_gv11b.h" + +#endif diff --git a/drivers/gpu/nvgpu/gv100/bios_gv100.c b/drivers/gpu/nvgpu/gv100/bios_gv100.c new file mode 100644 index 000000000..9ca05a11d --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/bios_gv100.c @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "gk20a/gk20a.h" +#include "gp106/bios_gp106.h" +#include "bios_gv100.h" + +#include +#include + +#define PMU_BOOT_TIMEOUT_DEFAULT 100 /* usec */ +#define PMU_BOOT_TIMEOUT_MAX 2000000 /* usec */ + +#define SCRATCH_PREOS_PROGRESS 6 +#define PREOS_PROGRESS_MASK(r) ((r >> 12) & 0xf) +#define PREOS_PROGRESS_NOT_STARTED 0 +#define PREOS_PROGRESS_STARTED 1 +#define PREOS_PROGRESS_EXIT 2 +#define PREOS_PROGRESS_EXIT_SECUREMODE 3 +#define PREOS_PROGRESS_ABORTED 6 + +#define SCRATCH_PMU_EXIT_AND_HALT 1 +#define PMU_EXIT_AND_HALT_SET(r, v) ((r & ~0x200UL) | v) +#define PMU_EXIT_AND_HALT_YES (0x1UL << 9) + +#define SCRATCH_PRE_OS_RELOAD 1 +#define PRE_OS_RELOAD_SET(r, v) ((r & ~0x100UL) | v) +#define PRE_OS_RELOAD_YES (0x1UL << 8) + + +void gv100_bios_preos_reload_check(struct gk20a *g) +{ + u32 progress = gk20a_readl(g, + bus_sw_scratch_r(SCRATCH_PREOS_PROGRESS)); + + if (PREOS_PROGRESS_MASK(progress) != PREOS_PROGRESS_NOT_STARTED) { + u32 reload = gk20a_readl(g, + bus_sw_scratch_r(SCRATCH_PRE_OS_RELOAD)); + + gk20a_writel(g, bus_sw_scratch_r(SCRATCH_PRE_OS_RELOAD), + PRE_OS_RELOAD_SET(reload, PRE_OS_RELOAD_YES)); + } +} + +int gv100_bios_preos_wait_for_halt(struct gk20a *g) +{ + int err = -EINVAL; + u32 progress; + u32 tmp; + int preos_completed; + struct nvgpu_timeout timeout; + + nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT); + + /* Check the progress */ + progress = gk20a_readl(g, bus_sw_scratch_r(SCRATCH_PREOS_PROGRESS)); + + if (PREOS_PROGRESS_MASK(progress) == PREOS_PROGRESS_STARTED) { + err = 0; + + /* Complete the handshake */ + tmp = gk20a_readl(g, + bus_sw_scratch_r(SCRATCH_PMU_EXIT_AND_HALT)); + + gk20a_writel(g, bus_sw_scratch_r(SCRATCH_PMU_EXIT_AND_HALT), + PMU_EXIT_AND_HALT_SET(tmp, PMU_EXIT_AND_HALT_YES)); + + nvgpu_timeout_init(g, &timeout, + PMU_BOOT_TIMEOUT_MAX / + PMU_BOOT_TIMEOUT_DEFAULT, + NVGPU_TIMER_RETRY_TIMER); + + do { + progress = gk20a_readl(g, + bus_sw_scratch_r(SCRATCH_PREOS_PROGRESS)); + preos_completed = pwr_falcon_cpuctl_halt_intr_v( + gk20a_readl(g, pwr_falcon_cpuctl_r())) && + (PREOS_PROGRESS_MASK(progress) == + PREOS_PROGRESS_EXIT); + nvgpu_udelay(PMU_BOOT_TIMEOUT_DEFAULT); + } while (!preos_completed && !nvgpu_timeout_expired(&timeout)); + } + + return err; +} diff --git a/drivers/gpu/nvgpu/gv100/bios_gv100.h b/drivers/gpu/nvgpu/gv100/bios_gv100.h new file mode 100644 index 000000000..c6433f570 --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/bios_gv100.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef NVGPU_BIOS_GV100_H +#define NVGPU_BIOS_GV100_H + +struct gk20a; + +void gv100_bios_preos_reload_check(struct gk20a *g); +int gv100_bios_preos_wait_for_halt(struct gk20a *g); + +#endif diff --git a/drivers/gpu/nvgpu/gv100/fb_gv100.c b/drivers/gpu/nvgpu/gv100/fb_gv100.c new file mode 100644 index 000000000..0a2939bf1 --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/fb_gv100.c @@ -0,0 +1,184 @@ +/* + * GV100 FB + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gk20a/gk20a.h" +#include "gv100/fb_gv100.h" +#include "gm20b/acr_gm20b.h" + +#include +#include +#include + +#define HW_SCRUB_TIMEOUT_DEFAULT 100 /* usec */ +#define HW_SCRUB_TIMEOUT_MAX 2000000 /* usec */ +#define MEM_UNLOCK_TIMEOUT 3500 /* msec */ + +void gv100_fb_reset(struct gk20a *g) +{ + u32 val; + int retries = HW_SCRUB_TIMEOUT_MAX / HW_SCRUB_TIMEOUT_DEFAULT; + + nvgpu_info(g, "reset gv100 fb"); + + /* wait for memory to be accessible */ + do { + u32 w = gk20a_readl(g, fb_niso_scrub_status_r()); + if (fb_niso_scrub_status_flag_v(w)) { + nvgpu_info(g, "done"); + break; + } + nvgpu_udelay(HW_SCRUB_TIMEOUT_DEFAULT); + } while (--retries); + + val = gk20a_readl(g, fb_mmu_priv_level_mask_r()); + val &= ~fb_mmu_priv_level_mask_write_violation_m(); + gk20a_writel(g, fb_mmu_priv_level_mask_r(), val); +} + +int gv100_fb_memory_unlock(struct gk20a *g) +{ + struct nvgpu_firmware *mem_unlock_fw = NULL; + struct bin_hdr *hsbin_hdr = NULL; + struct acr_fw_header *fw_hdr = NULL; + u32 *mem_unlock_ucode = NULL; + u32 *mem_unlock_ucode_header = NULL; + u32 sec_imem_dest = 0; + u32 val = 0; + int err = 0; + + nvgpu_log_fn(g, " "); + + /* Check vpr enable status */ + val = gk20a_readl(g, fb_mmu_vpr_info_r()); + val &= ~fb_mmu_vpr_info_index_m(); + val |= fb_mmu_vpr_info_index_cya_lo_v(); + gk20a_writel(g, fb_mmu_vpr_info_r(), val); + val = gk20a_readl(g, fb_mmu_vpr_info_r()); + if (!(val & fb_mmu_vpr_info_cya_lo_in_use_m())) { + nvgpu_log_info(g, "mem unlock not required on this SKU, skipping"); + goto exit; + } + + /* get mem unlock ucode binary */ + mem_unlock_fw = nvgpu_request_firmware(g, "mem_unlock.bin", 0); + if (!mem_unlock_fw) { + nvgpu_err(g, "mem unlock ucode get fail"); + err = -ENOENT; + goto exit; + } + + /* Enable nvdec */ + g->ops.mc.enable(g, mc_enable_nvdec_enabled_f()); + + /* nvdec falcon reset */ + nvgpu_flcn_reset(&g->nvdec_flcn); + + hsbin_hdr = (struct bin_hdr *)mem_unlock_fw->data; + fw_hdr = (struct acr_fw_header *)(mem_unlock_fw->data + + hsbin_hdr->header_offset); + + mem_unlock_ucode_header = (u32 *)(mem_unlock_fw->data + + fw_hdr->hdr_offset); + mem_unlock_ucode = (u32 *)(mem_unlock_fw->data + + hsbin_hdr->data_offset); + + /* Patch Ucode singnatures */ + if (acr_ucode_patch_sig(g, mem_unlock_ucode, + (u32 *)(mem_unlock_fw->data + fw_hdr->sig_prod_offset), + (u32 *)(mem_unlock_fw->data + fw_hdr->sig_dbg_offset), + (u32 *)(mem_unlock_fw->data + fw_hdr->patch_loc), + (u32 *)(mem_unlock_fw->data + fw_hdr->patch_sig)) < 0) { + nvgpu_err(g, "mem unlock patch signatures fail"); + err = -EPERM; + goto exit; + } + + /* Clear interrupts */ + nvgpu_flcn_set_irq(&g->nvdec_flcn, false, 0x0, 0x0); + + /* Copy Non Secure IMEM code */ + nvgpu_flcn_copy_to_imem(&g->nvdec_flcn, 0, + (u8 *)&mem_unlock_ucode[ + mem_unlock_ucode_header[OS_CODE_OFFSET] >> 2], + mem_unlock_ucode_header[OS_CODE_SIZE], 0, false, + GET_IMEM_TAG(mem_unlock_ucode_header[OS_CODE_OFFSET])); + + /* Put secure code after non-secure block */ + sec_imem_dest = GET_NEXT_BLOCK(mem_unlock_ucode_header[OS_CODE_SIZE]); + + nvgpu_flcn_copy_to_imem(&g->nvdec_flcn, sec_imem_dest, + (u8 *)&mem_unlock_ucode[ + mem_unlock_ucode_header[APP_0_CODE_OFFSET] >> 2], + mem_unlock_ucode_header[APP_0_CODE_SIZE], 0, true, + GET_IMEM_TAG(mem_unlock_ucode_header[APP_0_CODE_OFFSET])); + + /* load DMEM: ensure that signatures are patched */ + nvgpu_flcn_copy_to_dmem(&g->nvdec_flcn, 0, (u8 *)&mem_unlock_ucode[ + mem_unlock_ucode_header[OS_DATA_OFFSET] >> 2], + mem_unlock_ucode_header[OS_DATA_SIZE], 0); + + nvgpu_log_info(g, "nvdec sctl reg %x\n", + gk20a_readl(g, g->nvdec_flcn.flcn_base + + falcon_falcon_sctl_r())); + + /* set BOOTVEC to start of non-secure code */ + nvgpu_flcn_bootstrap(&g->nvdec_flcn, 0); + + /* wait for complete & halt */ + nvgpu_flcn_wait_for_halt(&g->nvdec_flcn, MEM_UNLOCK_TIMEOUT); + + /* check mem unlock status */ + val = nvgpu_flcn_mailbox_read(&g->nvdec_flcn, 0); + if (val) { + nvgpu_err(g, "memory unlock failed, err %x", val); + err = -1; + goto exit; + } + + nvgpu_log_info(g, "nvdec sctl reg %x\n", + gk20a_readl(g, g->nvdec_flcn.flcn_base + + falcon_falcon_sctl_r())); + +exit: + if (mem_unlock_fw) + nvgpu_release_firmware(g, mem_unlock_fw); + + nvgpu_log_fn(g, "done, status - %d", err); + + return err; +} diff --git a/drivers/gpu/nvgpu/gv100/fb_gv100.h b/drivers/gpu/nvgpu/gv100/fb_gv100.h new file mode 100644 index 000000000..b6db262a4 --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/fb_gv100.h @@ -0,0 +1,32 @@ +/* + * GV100 FB + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVGPU_GV100_FB +#define _NVGPU_GV100_FB + +struct gk20a; + +void gv100_fb_reset(struct gk20a *g); +int gv100_fb_memory_unlock(struct gk20a *g); +#endif diff --git a/drivers/gpu/nvgpu/gv100/fifo_gv100.c b/drivers/gpu/nvgpu/gv100/fifo_gv100.c new file mode 100644 index 000000000..79862f6b1 --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/fifo_gv100.c @@ -0,0 +1,40 @@ +/* + * GV100 fifo + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "fifo_gv100.h" + +#include + +#define DEFAULT_FIFO_PREEMPT_TIMEOUT 0x3FFFFFUL + +u32 gv100_fifo_get_num_fifos(struct gk20a *g) +{ + return ccsr_channel__size_1_v(); +} + +u32 gv100_fifo_get_preempt_timeout(struct gk20a *g) +{ + return DEFAULT_FIFO_PREEMPT_TIMEOUT; +} + diff --git a/drivers/gpu/nvgpu/gv100/fifo_gv100.h b/drivers/gpu/nvgpu/gv100/fifo_gv100.h new file mode 100644 index 000000000..af6ad030d --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/fifo_gv100.h @@ -0,0 +1,33 @@ +/* + * GV100 Fifo + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef FIFO_GV100_H +#define FIFO_GV100_H + +#include +struct gk20a; + +u32 gv100_fifo_get_num_fifos(struct gk20a *g); +u32 gv100_fifo_get_preempt_timeout(struct gk20a *g); +#endif diff --git a/drivers/gpu/nvgpu/gv100/gr_ctx_gv100.c b/drivers/gpu/nvgpu/gv100/gr_ctx_gv100.c new file mode 100644 index 000000000..8b50125e0 --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/gr_ctx_gv100.c @@ -0,0 +1,47 @@ +/* + * GV100 Graphics Context + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gk20a/gk20a.h" +#include "gr_ctx_gv100.h" + +int gr_gv100_get_netlist_name(struct gk20a *g, int index, char *name) +{ + u32 ver = g->params.gpu_arch + g->params.gpu_impl; + + switch (ver) { + case NVGPU_GPUID_GV100: + sprintf(name, "%s/%s", "gv100", + GV100_NETLIST_IMAGE_FW_NAME); + break; + default: + nvgpu_err(g, "no support for GPUID %x", ver); + } + + return 0; +} + +bool gr_gv100_is_firmware_defined(void) +{ + return true; +} diff --git a/drivers/gpu/nvgpu/gv100/gr_ctx_gv100.h b/drivers/gpu/nvgpu/gv100/gr_ctx_gv100.h new file mode 100644 index 000000000..2302d9883 --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/gr_ctx_gv100.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __GR_CTX_GV100_H__ +#define __GR_CTX_GV100_H__ + +#include "gk20a/gr_ctx_gk20a.h" +#include "nvgpu_gpuid_t19x.h" + +/* production netlist, one and only one from below */ +#define GV100_NETLIST_IMAGE_FW_NAME GK20A_NETLIST_IMAGE_D + +int gr_gv100_get_netlist_name(struct gk20a *g, int index, char *name); +bool gr_gv100_is_firmware_defined(void); + +#endif /*__GR_CTX_GV100_H__*/ diff --git a/drivers/gpu/nvgpu/gv100/gr_gv100.c b/drivers/gpu/nvgpu/gv100/gr_gv100.c new file mode 100644 index 000000000..430c7cd01 --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/gr_gv100.c @@ -0,0 +1,349 @@ +/* + * GV100 GPU GR + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "gk20a/gk20a.h" +#include "gk20a/gr_gk20a.h" + +#include "gv100/gr_gv100.h" +#include "gv11b/subctx_gv11b.h" + +#include +#include + +/* + * Estimate performance if the given logical TPC in the given logical GPC were + * removed. + */ +static int gr_gv100_scg_estimate_perf(struct gk20a *g, + unsigned long *gpc_tpc_mask, + u32 disable_gpc_id, u32 disable_tpc_id, + int *perf) +{ + struct gr_gk20a *gr = &g->gr; + int err = 0; + u32 scale_factor = 512UL; /* Use fx23.9 */ + u32 pix_scale = 1024*1024UL; /* Pix perf in [29:20] */ + u32 world_scale = 1024UL; /* World performance in [19:10] */ + u32 tpc_scale = 1; /* TPC balancing in [9:0] */ + u32 scg_num_pes = 0; + u32 min_scg_gpc_pix_perf = scale_factor; /* Init perf as maximum */ + u32 average_tpcs = 0; /* Average of # of TPCs per GPC */ + u32 deviation; /* absolute diff between TPC# and + * average_tpcs, averaged across GPCs + */ + u32 norm_tpc_deviation; /* deviation/max_tpc_per_gpc */ + u32 tpc_balance; + u32 scg_gpc_pix_perf; + u32 scg_world_perf; + u32 gpc_id; + u32 pes_id; + int diff; + bool is_tpc_removed_gpc = false; + bool is_tpc_removed_pes = false; + u32 max_tpc_gpc = 0; + u32 num_tpc_mask; + u32 *num_tpc_gpc = nvgpu_kzalloc(g, sizeof(u32) * + nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS)); + + if (!num_tpc_gpc) + return -ENOMEM; + + /* Calculate pix-perf-reduction-rate per GPC and find bottleneck TPC */ + for (gpc_id = 0; gpc_id < gr->gpc_count; gpc_id++) { + num_tpc_mask = gpc_tpc_mask[gpc_id]; + + if ((gpc_id == disable_gpc_id) && num_tpc_mask & + (0x1 << disable_tpc_id)) { + /* Safety check if a TPC is removed twice */ + if (is_tpc_removed_gpc) { + err = -EINVAL; + goto free_resources; + } + /* Remove logical TPC from set */ + num_tpc_mask &= ~(0x1 << disable_tpc_id); + is_tpc_removed_gpc = true; + } + + /* track balancing of tpcs across gpcs */ + num_tpc_gpc[gpc_id] = hweight32(num_tpc_mask); + average_tpcs += num_tpc_gpc[gpc_id]; + + /* save the maximum numer of gpcs */ + max_tpc_gpc = num_tpc_gpc[gpc_id] > max_tpc_gpc ? + num_tpc_gpc[gpc_id] : max_tpc_gpc; + + /* + * Calculate ratio between TPC count and post-FS and post-SCG + * + * ratio represents relative throughput of the GPC + */ + scg_gpc_pix_perf = scale_factor * num_tpc_gpc[gpc_id] / + gr->gpc_tpc_count[gpc_id]; + + if (min_scg_gpc_pix_perf > scg_gpc_pix_perf) + min_scg_gpc_pix_perf = scg_gpc_pix_perf; + + /* Calculate # of surviving PES */ + for (pes_id = 0; pes_id < gr->gpc_ppc_count[gpc_id]; pes_id++) { + /* Count the number of TPC on the set */ + num_tpc_mask = gr->pes_tpc_mask[pes_id][gpc_id] & + gpc_tpc_mask[gpc_id]; + + if ((gpc_id == disable_gpc_id) && (num_tpc_mask & + (0x1 << disable_tpc_id))) { + + if (is_tpc_removed_pes) { + err = -EINVAL; + goto free_resources; + } + num_tpc_mask &= ~(0x1 << disable_tpc_id); + is_tpc_removed_pes = true; + } + if (hweight32(num_tpc_mask)) + scg_num_pes++; + } + } + + if (!is_tpc_removed_gpc || !is_tpc_removed_pes) { + err = -EINVAL; + goto free_resources; + } + + if (max_tpc_gpc == 0) { + *perf = 0; + goto free_resources; + } + + /* Now calculate perf */ + scg_world_perf = (scale_factor * scg_num_pes) / gr->ppc_count; + deviation = 0; + average_tpcs = scale_factor * average_tpcs / gr->gpc_count; + for (gpc_id =0; gpc_id < gr->gpc_count; gpc_id++) { + diff = average_tpcs - scale_factor * num_tpc_gpc[gpc_id]; + if (diff < 0) + diff = -diff; + deviation += diff; + } + + deviation /= gr->gpc_count; + + norm_tpc_deviation = deviation / max_tpc_gpc; + + tpc_balance = scale_factor - norm_tpc_deviation; + + if ((tpc_balance > scale_factor) || + (scg_world_perf > scale_factor) || + (min_scg_gpc_pix_perf > scale_factor) || + (norm_tpc_deviation > scale_factor)) { + err = -EINVAL; + goto free_resources; + } + + *perf = (pix_scale * min_scg_gpc_pix_perf) + + (world_scale * scg_world_perf) + + (tpc_scale * tpc_balance); +free_resources: + nvgpu_kfree(g, num_tpc_gpc); + return err; +} + +void gr_gv100_bundle_cb_defaults(struct gk20a *g) +{ + struct gr_gk20a *gr = &g->gr; + + gr->bundle_cb_default_size = + gr_scc_bundle_cb_size_div_256b__prod_v(); + gr->min_gpm_fifo_depth = + gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(); + gr->bundle_cb_token_limit = + gr_pd_ab_dist_cfg2_token_limit_init_v(); +} + +void gr_gv100_cb_size_default(struct gk20a *g) +{ + struct gr_gk20a *gr = &g->gr; + + if (!gr->attrib_cb_default_size) + gr->attrib_cb_default_size = + gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v(); + gr->alpha_cb_default_size = + gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(); +} + +void gr_gv100_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) +{ +} + +void gr_gv100_init_sm_id_table(struct gk20a *g) +{ + u32 gpc, tpc, sm, pes, gtpc; + u32 sm_id = 0; + u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); + u32 num_sm = sm_per_tpc * g->gr.tpc_count; + int perf, maxperf; + int err; + unsigned long *gpc_tpc_mask; + u32 *tpc_table, *gpc_table; + + gpc_table = nvgpu_kzalloc(g, g->gr.tpc_count * sizeof(u32)); + tpc_table = nvgpu_kzalloc(g, g->gr.tpc_count * sizeof(u32)); + gpc_tpc_mask = nvgpu_kzalloc(g, sizeof(unsigned long) * + nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS)); + + if (!gpc_table || !tpc_table || !gpc_tpc_mask) { + nvgpu_err(g, "Error allocating memory for sm tables"); + goto exit_build_table; + } + + for (gpc = 0; gpc < g->gr.gpc_count; gpc++) + for (pes = 0; pes < g->gr.gpc_ppc_count[gpc]; pes++) + gpc_tpc_mask[gpc] |= g->gr.pes_tpc_mask[pes][gpc]; + + for (gtpc = 0; gtpc < g->gr.tpc_count; gtpc++) { + maxperf = -1; + for (gpc = 0; gpc < g->gr.gpc_count; gpc++) { + for_each_set_bit(tpc, &gpc_tpc_mask[gpc], + g->gr.gpc_tpc_count[gpc]) { + perf = -1; + err = gr_gv100_scg_estimate_perf(g, + gpc_tpc_mask, gpc, tpc, &perf); + + if (err) { + nvgpu_err(g, + "Error while estimating perf"); + goto exit_build_table; + } + + if (perf >= maxperf) { + maxperf = perf; + gpc_table[gtpc] = gpc; + tpc_table[gtpc] = tpc; + } + } + } + gpc_tpc_mask[gpc_table[gtpc]] &= ~(0x1 << tpc_table[gtpc]); + } + + for (tpc = 0, sm_id = 0; sm_id < num_sm; tpc++, sm_id += sm_per_tpc) { + for (sm = 0; sm < sm_per_tpc; sm++) { + u32 index = sm_id + sm; + + g->gr.sm_to_cluster[index].gpc_index = gpc_table[tpc]; + g->gr.sm_to_cluster[index].tpc_index = tpc_table[tpc]; + g->gr.sm_to_cluster[index].sm_index = sm; + g->gr.sm_to_cluster[index].global_tpc_index = tpc; + nvgpu_log_info(g, + "gpc : %d tpc %d sm_index %d global_index: %d", + g->gr.sm_to_cluster[index].gpc_index, + g->gr.sm_to_cluster[index].tpc_index, + g->gr.sm_to_cluster[index].sm_index, + g->gr.sm_to_cluster[index].global_tpc_index); + + } + } + + g->gr.no_of_sm = num_sm; + nvgpu_log_info(g, " total number of sm = %d", g->gr.no_of_sm); +exit_build_table: + nvgpu_kfree(g, gpc_table); + nvgpu_kfree(g, tpc_table); + nvgpu_kfree(g, gpc_tpc_mask); +} + +void gr_gv100_load_tpc_mask(struct gk20a *g) +{ + u64 pes_tpc_mask = 0x0ULL; + u32 gpc, pes; + u32 num_tpc_per_gpc = nvgpu_get_litter_value(g, + GPU_LIT_NUM_TPC_PER_GPC); + + /* gv100 has 6 GPC and 7 TPC/GPC */ + for (gpc = 0; gpc < g->gr.gpc_count; gpc++) { + for (pes = 0; pes < g->gr.pe_count_per_gpc; pes++) { + pes_tpc_mask |= (u64) g->gr.pes_tpc_mask[pes][gpc] << + (num_tpc_per_gpc * gpc); + } + } + + nvgpu_log_info(g, "pes_tpc_mask: %016llx\n", pes_tpc_mask); + gk20a_writel(g, gr_fe_tpc_fs_r(0), u64_lo32(pes_tpc_mask)); + gk20a_writel(g, gr_fe_tpc_fs_r(1), u64_hi32(pes_tpc_mask)); +} + +u32 gr_gv100_get_patch_slots(struct gk20a *g) +{ + struct gr_gk20a *gr = &g->gr; + struct fifo_gk20a *f = &g->fifo; + u32 size = 0; + + /* + * CMD to update PE table + */ + size++; + + /* + * Update PE table contents + * for PE table, each patch buffer update writes 32 TPCs + */ + size += DIV_ROUND_UP(gr->tpc_count, 32); + + /* + * Update the PL table contents + * For PL table, each patch buffer update configures 4 TPCs + */ + size += DIV_ROUND_UP(gr->tpc_count, 4); + + /* + * We need this for all subcontexts + */ + size *= f->t19x.max_subctx_count; + + /* + * Add space for a partition mode change as well + * reserve two slots since DYNAMIC -> STATIC requires + * DYNAMIC -> NONE -> STATIC + */ + size += 2; + + /* + * Add current patch buffer size + */ + size += gr_gk20a_get_patch_slots(g); + + /* + * Align to 4K size + */ + size = ALIGN(size, PATCH_CTX_SLOTS_PER_PAGE); + + /* + * Increase the size to accommodate for additional TPC partition update + */ + size += 2 * PATCH_CTX_SLOTS_PER_PAGE; + + return size; +} diff --git a/drivers/gpu/nvgpu/gv100/gr_gv100.h b/drivers/gpu/nvgpu/gv100/gr_gv100.h new file mode 100644 index 000000000..612f76f99 --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/gr_gv100.h @@ -0,0 +1,36 @@ +/* + * GV100 GPU GR + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVGPU_GR_GV100_H_ +#define _NVGPU_GR_GV100_H_ + +void gr_gv100_bundle_cb_defaults(struct gk20a *g); +void gr_gv100_cb_size_default(struct gk20a *g); +void gr_gv100_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index); +void gr_gv100_init_sm_id_table(struct gk20a *g); +void gr_gv100_program_sm_id_numbering(struct gk20a *g, + u32 gpc, u32 tpc, u32 smid); +int gr_gv100_load_smid_config(struct gk20a *g); +u32 gr_gv100_get_patch_slots(struct gk20a *g); +#endif diff --git a/drivers/gpu/nvgpu/gv100/gv100.h b/drivers/gpu/nvgpu/gv100/gv100.h new file mode 100644 index 000000000..7cc1f77ba --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/gv100.h @@ -0,0 +1,32 @@ +/* + * GV100 Graphics + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef GV100_H +#define GV100_H + +#include "gk20a/gk20a.h" + +int gv100_init_gpu_characteristics(struct gk20a *g); + +#endif /* GV11B_H */ diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.c b/drivers/gpu/nvgpu/gv100/hal_gv100.c new file mode 100644 index 000000000..4044c4b50 --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/hal_gv100.c @@ -0,0 +1,769 @@ +/* + * GV100 Tegra HAL interface + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include +#include + +#include "gk20a/gk20a.h" +#include "gk20a/fifo_gk20a.h" +#include "gk20a/fecs_trace_gk20a.h" +#include "gk20a/css_gr_gk20a.h" +#include "gk20a/mc_gk20a.h" +#include "gk20a/dbg_gpu_gk20a.h" +#include "gk20a/bus_gk20a.h" +#include "gk20a/pramin_gk20a.h" +#include "gk20a/flcn_gk20a.h" +#include "gk20a/regops_gk20a.h" +#include "gk20a/fb_gk20a.h" +#include "gk20a/mm_gk20a.h" +#include "gk20a/pmu_gk20a.h" +#include "gk20a/gr_gk20a.h" + +#include "gm20b/ltc_gm20b.h" +#include "gm20b/gr_gm20b.h" +#include "gm20b/fifo_gm20b.h" +#include "gm20b/fb_gm20b.h" +#include "gm20b/mm_gm20b.h" +#include "gm20b/pmu_gm20b.h" +#include "gm20b/acr_gm20b.h" + +#include "gp10b/fb_gp10b.h" +#include "gp10b/gr_gp10b.h" + +#include "gp106/clk_gp106.h" +#include "gp106/clk_arb_gp106.h" +#include "gp106/pmu_gp106.h" +#include "gp106/acr_gp106.h" +#include "gp106/sec2_gp106.h" +#include "gp106/bios_gp106.h" +#include "gv100/bios_gv100.h" +#include "gp106/therm_gp106.h" +#include "gp106/xve_gp106.h" +#include "gp106/clk_gp106.h" +#include "gp106/flcn_gp106.h" +#include "gp10b/ltc_gp10b.h" +#include "gp10b/therm_gp10b.h" +#include "gp10b/mc_gp10b.h" +#include "gp10b/ce_gp10b.h" +#include "gp10b/priv_ring_gp10b.h" +#include "gp10b/fifo_gp10b.h" +#include "gp10b/fecs_trace_gp10b.h" +#include "gp10b/mm_gp10b.h" +#include "gp10b/pmu_gp10b.h" + +#include "gv11b/css_gr_gv11b.h" +#include "gv11b/dbg_gpu_gv11b.h" +#include "gv11b/hal_gv11b.h" +#include "gv100/gr_gv100.h" +#include "gv11b/mc_gv11b.h" +#include "gv11b/ltc_gv11b.h" +#include "gv11b/gv11b.h" +#include "gv11b/ce_gv11b.h" +#include "gv100/gr_ctx_gv100.h" +#include "gv11b/mm_gv11b.h" +#include "gv11b/pmu_gv11b.h" +#include "gv11b/fb_gv11b.h" +#include "gv100/mm_gv100.h" +#include "gv11b/pmu_gv11b.h" +#include "gv100/fb_gv100.h" +#include "gv100/fifo_gv100.h" +#include "gv11b/fifo_gv11b.h" +#include "gv11b/regops_gv11b.h" + +#include "gv11b/gv11b_gating_reglist.h" +#include "gv100/regops_gv100.h" +#include "gv11b/subctx_gv11b.h" + +#include "gv100.h" +#include "hal_gv100.h" +#include "gv100/fb_gv100.h" +#include "gv100/mm_gv100.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +static int gv100_get_litter_value(struct gk20a *g, int value) +{ + int ret = EINVAL; + switch (value) { + case GPU_LIT_NUM_GPCS: + ret = proj_scal_litter_num_gpcs_v(); + break; + case GPU_LIT_NUM_PES_PER_GPC: + ret = proj_scal_litter_num_pes_per_gpc_v(); + break; + case GPU_LIT_NUM_ZCULL_BANKS: + ret = proj_scal_litter_num_zcull_banks_v(); + break; + case GPU_LIT_NUM_TPC_PER_GPC: + ret = proj_scal_litter_num_tpc_per_gpc_v(); + break; + case GPU_LIT_NUM_SM_PER_TPC: + ret = proj_scal_litter_num_sm_per_tpc_v(); + break; + case GPU_LIT_NUM_FBPS: + ret = proj_scal_litter_num_fbps_v(); + break; + case GPU_LIT_GPC_BASE: + ret = proj_gpc_base_v(); + break; + case GPU_LIT_GPC_STRIDE: + ret = proj_gpc_stride_v(); + break; + case GPU_LIT_GPC_SHARED_BASE: + ret = proj_gpc_shared_base_v(); + break; + case GPU_LIT_TPC_IN_GPC_BASE: + ret = proj_tpc_in_gpc_base_v(); + break; + case GPU_LIT_TPC_IN_GPC_STRIDE: + ret = proj_tpc_in_gpc_stride_v(); + break; + case GPU_LIT_TPC_IN_GPC_SHARED_BASE: + ret = proj_tpc_in_gpc_shared_base_v(); + break; + case GPU_LIT_PPC_IN_GPC_BASE: + ret = proj_ppc_in_gpc_base_v(); + break; + case GPU_LIT_PPC_IN_GPC_STRIDE: + ret = proj_ppc_in_gpc_stride_v(); + break; + case GPU_LIT_PPC_IN_GPC_SHARED_BASE: + ret = proj_ppc_in_gpc_shared_base_v(); + break; + case GPU_LIT_ROP_BASE: + ret = proj_rop_base_v(); + break; + case GPU_LIT_ROP_STRIDE: + ret = proj_rop_stride_v(); + break; + case GPU_LIT_ROP_SHARED_BASE: + ret = proj_rop_shared_base_v(); + break; + case GPU_LIT_HOST_NUM_ENGINES: + ret = proj_host_num_engines_v(); + break; + case GPU_LIT_HOST_NUM_PBDMA: + ret = proj_host_num_pbdma_v(); + break; + case GPU_LIT_LTC_STRIDE: + ret = proj_ltc_stride_v(); + break; + case GPU_LIT_LTS_STRIDE: + ret = proj_lts_stride_v(); + break; + case GPU_LIT_NUM_FBPAS: + ret = proj_scal_litter_num_fbpas_v(); + break; + case GPU_LIT_FBPA_SHARED_BASE: + ret = proj_fbpa_shared_base_v(); + break; + case GPU_LIT_FBPA_BASE: + ret = proj_fbpa_base_v(); + break; + case GPU_LIT_FBPA_STRIDE: + ret = proj_fbpa_stride_v(); + break; + case GPU_LIT_SM_PRI_STRIDE: + ret = proj_sm_stride_v(); + break; + case GPU_LIT_SMPC_PRI_BASE: + ret = proj_smpc_base_v(); + break; + case GPU_LIT_SMPC_PRI_SHARED_BASE: + ret = proj_smpc_shared_base_v(); + break; + case GPU_LIT_SMPC_PRI_UNIQUE_BASE: + ret = proj_smpc_unique_base_v(); + break; + case GPU_LIT_SMPC_PRI_STRIDE: + ret = proj_smpc_stride_v(); + break; + case GPU_LIT_TWOD_CLASS: + ret = FERMI_TWOD_A; + break; + case GPU_LIT_THREED_CLASS: + ret = VOLTA_A; + break; + case GPU_LIT_COMPUTE_CLASS: + ret = VOLTA_COMPUTE_A; + break; + case GPU_LIT_GPFIFO_CLASS: + ret = VOLTA_CHANNEL_GPFIFO_A; + break; + case GPU_LIT_I2M_CLASS: + ret = KEPLER_INLINE_TO_MEMORY_B; + break; + case GPU_LIT_DMA_COPY_CLASS: + ret = VOLTA_DMA_COPY_A; + break; + default: + break; + } + + return ret; +} + +int gv100_init_gpu_characteristics(struct gk20a *g) +{ + int err; + + err = gk20a_init_gpu_characteristics(g); + if (err) + return err; + + __nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true); + + return 0; +} + + + +static const struct gpu_ops gv100_ops = { + .bios = { + .init = gp106_bios_init, + .preos_wait_for_halt = gv100_bios_preos_wait_for_halt, + .preos_reload_check = gv100_bios_preos_reload_check, + }, + .ltc = { + .determine_L2_size_bytes = gp10b_determine_L2_size_bytes, + .set_zbc_s_entry = gv11b_ltc_set_zbc_stencil_entry, + .set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry, + .set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry, + .init_cbc = NULL, + .init_fs_state = gv11b_ltc_init_fs_state, + .init_comptags = gp10b_ltc_init_comptags, + .cbc_ctrl = gm20b_ltc_cbc_ctrl, + .isr = gv11b_ltc_isr, + .cbc_fix_config = NULL, + .flush = gm20b_flush_ltc, + .set_enabled = gp10b_ltc_set_enabled, + }, + .ce2 = { + .isr_stall = gv11b_ce_isr, + .isr_nonstall = gp10b_ce_nonstall_isr, + .get_num_pce = gv11b_ce_get_num_pce, + }, + .gr = { + .get_patch_slots = gr_gv100_get_patch_slots, + .init_gpc_mmu = gr_gv11b_init_gpc_mmu, + .bundle_cb_defaults = gr_gv100_bundle_cb_defaults, + .cb_size_default = gr_gv100_cb_size_default, + .calc_global_ctx_buffer_size = + gr_gv11b_calc_global_ctx_buffer_size, + .commit_global_attrib_cb = gr_gv11b_commit_global_attrib_cb, + .commit_global_bundle_cb = gr_gp10b_commit_global_bundle_cb, + .commit_global_cb_manager = gr_gp10b_commit_global_cb_manager, + .commit_global_pagepool = gr_gp10b_commit_global_pagepool, + .handle_sw_method = gr_gv11b_handle_sw_method, + .set_alpha_circular_buffer_size = + gr_gv11b_set_alpha_circular_buffer_size, + .set_circular_buffer_size = gr_gv11b_set_circular_buffer_size, + .enable_hww_exceptions = gr_gv11b_enable_hww_exceptions, + .is_valid_class = gr_gv11b_is_valid_class, + .is_valid_gfx_class = gr_gv11b_is_valid_gfx_class, + .is_valid_compute_class = gr_gv11b_is_valid_compute_class, + .get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs, + .get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs, + .init_fs_state = gr_gv11b_init_fs_state, + .set_hww_esr_report_mask = gv11b_gr_set_hww_esr_report_mask, + .falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments, + .load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode, + .set_gpc_tpc_mask = gr_gv100_set_gpc_tpc_mask, + .get_gpc_tpc_mask = gr_gm20b_get_gpc_tpc_mask, + .free_channel_ctx = gk20a_free_channel_ctx, + .alloc_obj_ctx = gk20a_alloc_obj_ctx, + .bind_ctxsw_zcull = gr_gk20a_bind_ctxsw_zcull, + .get_zcull_info = gr_gk20a_get_zcull_info, + .is_tpc_addr = gr_gm20b_is_tpc_addr, + .get_tpc_num = gr_gm20b_get_tpc_num, + .detect_sm_arch = gr_gv11b_detect_sm_arch, + .add_zbc_color = gr_gp10b_add_zbc_color, + .add_zbc_depth = gr_gp10b_add_zbc_depth, + .zbc_set_table = gk20a_gr_zbc_set_table, + .zbc_query_table = gr_gk20a_query_zbc, + .pmu_save_zbc = gk20a_pmu_save_zbc, + .add_zbc = gr_gk20a_add_zbc, + .pagepool_default_size = gr_gv11b_pagepool_default_size, + .init_ctx_state = gr_gp10b_init_ctx_state, + .alloc_gr_ctx = gr_gp10b_alloc_gr_ctx, + .free_gr_ctx = gr_gp10b_free_gr_ctx, + .update_ctxsw_preemption_mode = + gr_gp10b_update_ctxsw_preemption_mode, + .dump_gr_regs = gr_gv11b_dump_gr_status_regs, + .update_pc_sampling = gr_gm20b_update_pc_sampling, + .get_fbp_en_mask = gr_gm20b_get_fbp_en_mask, + .get_max_ltc_per_fbp = gr_gm20b_get_max_ltc_per_fbp, + .get_max_lts_per_ltc = gr_gm20b_get_max_lts_per_ltc, + .get_rop_l2_en_mask = gr_gm20b_rop_l2_en_mask, + .get_max_fbps_count = gr_gm20b_get_max_fbps_count, + .init_sm_dsm_reg_info = gv11b_gr_init_sm_dsm_reg_info, + .wait_empty = gr_gv11b_wait_empty, + .init_cyclestats = gr_gm20b_init_cyclestats, + .set_sm_debug_mode = gv11b_gr_set_sm_debug_mode, + .enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs, + .bpt_reg_info = gv11b_gr_bpt_reg_info, + .get_access_map = gr_gv11b_get_access_map, + .handle_fecs_error = gr_gv11b_handle_fecs_error, + .handle_sm_exception = gr_gk20a_handle_sm_exception, + .handle_tex_exception = gr_gv11b_handle_tex_exception, + .enable_gpc_exceptions = gr_gv11b_enable_gpc_exceptions, + .enable_exceptions = gr_gv11b_enable_exceptions, + .get_lrf_tex_ltc_dram_override = get_ecc_override_val, + .update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode, + .update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode, + .record_sm_error_state = gv11b_gr_record_sm_error_state, + .update_sm_error_state = gv11b_gr_update_sm_error_state, + .clear_sm_error_state = gm20b_gr_clear_sm_error_state, + .suspend_contexts = gr_gp10b_suspend_contexts, + .resume_contexts = gr_gk20a_resume_contexts, + .get_preemption_mode_flags = gr_gp10b_get_preemption_mode_flags, + .init_sm_id_table = gr_gv100_init_sm_id_table, + .load_smid_config = gr_gv11b_load_smid_config, + .program_sm_id_numbering = gr_gv11b_program_sm_id_numbering, + .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr, + .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr, + .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr, + .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr, + .setup_rop_mapping = gr_gv11b_setup_rop_mapping, + .program_zcull_mapping = gr_gv11b_program_zcull_mapping, + .commit_global_timeslice = gr_gv11b_commit_global_timeslice, + .commit_inst = gr_gv11b_commit_inst, + .write_zcull_ptr = gr_gv11b_write_zcull_ptr, + .write_pm_ptr = gr_gv11b_write_pm_ptr, + .init_elcg_mode = gr_gv11b_init_elcg_mode, + .load_tpc_mask = gr_gv11b_load_tpc_mask, + .inval_icache = gr_gk20a_inval_icache, + .trigger_suspend = gv11b_gr_sm_trigger_suspend, + .wait_for_pause = gr_gk20a_wait_for_pause, + .resume_from_pause = gv11b_gr_resume_from_pause, + .clear_sm_errors = gr_gk20a_clear_sm_errors, + .tpc_enabled_exceptions = gr_gk20a_tpc_enabled_exceptions, + .get_esr_sm_sel = gv11b_gr_get_esr_sm_sel, + .sm_debugger_attached = gv11b_gr_sm_debugger_attached, + .suspend_single_sm = gv11b_gr_suspend_single_sm, + .suspend_all_sms = gv11b_gr_suspend_all_sms, + .resume_single_sm = gv11b_gr_resume_single_sm, + .resume_all_sms = gv11b_gr_resume_all_sms, + .get_sm_hww_warp_esr = gv11b_gr_get_sm_hww_warp_esr, + .get_sm_hww_global_esr = gv11b_gr_get_sm_hww_global_esr, + .get_sm_no_lock_down_hww_global_esr_mask = + gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask, + .lock_down_sm = gv11b_gr_lock_down_sm, + .wait_for_sm_lock_down = gv11b_gr_wait_for_sm_lock_down, + .clear_sm_hww = gv11b_gr_clear_sm_hww, + .init_ovr_sm_dsm_perf = gv11b_gr_init_ovr_sm_dsm_perf, + .get_ovr_perf_regs = gv11b_gr_get_ovr_perf_regs, + .disable_rd_coalesce = gm20a_gr_disable_rd_coalesce, + .set_boosted_ctx = gr_gp10b_set_boosted_ctx, + .set_preemption_mode = gr_gp10b_set_preemption_mode, + .set_czf_bypass = NULL, + .pre_process_sm_exception = gr_gv11b_pre_process_sm_exception, + .set_preemption_buffer_va = gr_gv11b_set_preemption_buffer_va, + .init_preemption_state = NULL, + .update_boosted_ctx = gr_gp10b_update_boosted_ctx, + .set_bes_crop_debug3 = gr_gp10b_set_bes_crop_debug3, + .create_gr_sysfs = gr_gv11b_create_sysfs, + .set_ctxsw_preemption_mode = gr_gp10b_set_ctxsw_preemption_mode, + .is_etpc_addr = gv11b_gr_pri_is_etpc_addr, + .egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table, + .handle_tpc_mpc_exception = gr_gv11b_handle_tpc_mpc_exception, + .zbc_s_query_table = gr_gv11b_zbc_s_query_table, + .load_zbc_s_default_tbl = gr_gv11b_load_stencil_default_tbl, + .handle_gpc_gpcmmu_exception = + gr_gv11b_handle_gpc_gpcmmu_exception, + .add_zbc_type_s = gr_gv11b_add_zbc_type_s, + .get_egpc_base = gv11b_gr_get_egpc_base, + .get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num, + .handle_gpc_gpccs_exception = + gr_gv11b_handle_gpc_gpccs_exception, + .load_zbc_s_tbl = gr_gv11b_load_stencil_tbl, + .access_smpc_reg = gv11b_gr_access_smpc_reg, + .is_egpc_addr = gv11b_gr_pri_is_egpc_addr, + .add_zbc_s = gr_gv11b_add_zbc_stencil, + .handle_gcc_exception = gr_gv11b_handle_gcc_exception, + .init_sw_veid_bundle = gr_gv11b_init_sw_veid_bundle, + .handle_tpc_sm_ecc_exception = + gr_gv11b_handle_tpc_sm_ecc_exception, + .decode_egpc_addr = gv11b_gr_decode_egpc_addr, + }, + .fb = { + .reset = gv100_fb_reset, + .init_hw = gk20a_fb_init_hw, + .init_fs_state = NULL, + .set_mmu_page_size = gm20b_fb_set_mmu_page_size, + .set_use_full_comp_tag_line = + gm20b_fb_set_use_full_comp_tag_line, + .compression_page_size = gp10b_fb_compression_page_size, + .compressible_page_size = gp10b_fb_compressible_page_size, + .vpr_info_fetch = gm20b_fb_vpr_info_fetch, + .dump_vpr_wpr_info = gm20b_fb_dump_vpr_wpr_info, + .read_wpr_info = gm20b_fb_read_wpr_info, + .is_debug_mode_enabled = gm20b_fb_debug_mode_enabled, + .set_debug_mode = gm20b_fb_set_debug_mode, + .tlb_invalidate = gk20a_fb_tlb_invalidate, + .hub_isr = gv11b_fb_hub_isr, + .mem_unlock = gv100_fb_memory_unlock, + }, + .fifo = { + .get_preempt_timeout = gv100_fifo_get_preempt_timeout, + .init_fifo_setup_hw = gv11b_init_fifo_setup_hw, + .bind_channel = channel_gm20b_bind, + .unbind_channel = channel_gv11b_unbind, + .disable_channel = gk20a_fifo_disable_channel, + .enable_channel = gk20a_fifo_enable_channel, + .alloc_inst = gk20a_fifo_alloc_inst, + .free_inst = gk20a_fifo_free_inst, + .setup_ramfc = channel_gv11b_setup_ramfc, + .channel_set_timeslice = gk20a_fifo_set_timeslice, + .default_timeslice_us = gk20a_fifo_default_timeslice_us, + .setup_userd = gk20a_fifo_setup_userd, + .userd_gp_get = gv11b_userd_gp_get, + .userd_gp_put = gv11b_userd_gp_put, + .userd_pb_get = gv11b_userd_pb_get, + .pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val, + .preempt_channel = gv11b_fifo_preempt_channel, + .preempt_tsg = gv11b_fifo_preempt_tsg, + .enable_tsg = gv11b_fifo_enable_tsg, + .disable_tsg = gk20a_disable_tsg, + .tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status, + .tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload, + .tsg_verify_status_faulted = gv11b_fifo_tsg_verify_status_faulted, + .update_runlist = gk20a_fifo_update_runlist, + .trigger_mmu_fault = NULL, + .get_mmu_fault_info = NULL, + .wait_engine_idle = gk20a_fifo_wait_engine_idle, + .get_num_fifos = gv100_fifo_get_num_fifos, + .get_pbdma_signature = gp10b_fifo_get_pbdma_signature, + .set_runlist_interleave = gk20a_fifo_set_runlist_interleave, + .tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice, + .force_reset_ch = gk20a_fifo_force_reset_ch, + .engine_enum_from_type = gp10b_fifo_engine_enum_from_type, + .device_info_data_parse = gp10b_device_info_data_parse, + .eng_runlist_base_size = fifo_eng_runlist_base__size_1_v, + .init_engine_info = gk20a_fifo_init_engine_info, + .runlist_entry_size = ram_rl_entry_size_v, + .get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry, + .get_ch_runlist_entry = gv11b_get_ch_runlist_entry, + .is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc, + .dump_pbdma_status = gk20a_dump_pbdma_status, + .dump_eng_status = gv11b_dump_eng_status, + .dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc, + .intr_0_error_mask = gv11b_fifo_intr_0_error_mask, + .is_preempt_pending = gv11b_fifo_is_preempt_pending, + .init_pbdma_intr_descs = gv11b_fifo_init_pbdma_intr_descs, + .reset_enable_hw = gk20a_init_fifo_reset_enable_hw, + .teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg, + .handle_sched_error = gv11b_fifo_handle_sched_error, + .handle_pbdma_intr_0 = gv11b_fifo_handle_pbdma_intr_0, + .handle_pbdma_intr_1 = gv11b_fifo_handle_pbdma_intr_1, + .init_eng_method_buffers = gv11b_fifo_init_eng_method_buffers, + .deinit_eng_method_buffers = + gv11b_fifo_deinit_eng_method_buffers, + .tsg_bind_channel = gk20a_tsg_bind_channel, + .tsg_unbind_channel = gk20a_tsg_unbind_channel, +#ifdef CONFIG_TEGRA_GK20A_NVHOST + .alloc_syncpt_buf = gv11b_fifo_alloc_syncpt_buf, + .free_syncpt_buf = gv11b_fifo_free_syncpt_buf, + .add_syncpt_wait_cmd = gv11b_fifo_add_syncpt_wait_cmd, + .get_syncpt_wait_cmd_size = gv11b_fifo_get_syncpt_wait_cmd_size, + .add_syncpt_incr_cmd = gv11b_fifo_add_syncpt_incr_cmd, + .get_syncpt_incr_cmd_size = gv11b_fifo_get_syncpt_incr_cmd_size, +#endif + .resetup_ramfc = NULL, + .device_info_fault_id = top_device_info_data_fault_id_enum_v, + .free_channel_ctx_header = gv11b_free_subctx_header, + .preempt_ch_tsg = gv11b_fifo_preempt_ch_tsg, + .handle_ctxsw_timeout = gv11b_fifo_handle_ctxsw_timeout, + }, + .gr_ctx = { + .get_netlist_name = gr_gv100_get_netlist_name, + .is_fw_defined = gr_gv100_is_firmware_defined, + }, +#ifdef CONFIG_GK20A_CTXSW_TRACE + .fecs_trace = { + .alloc_user_buffer = NULL, + .free_user_buffer = NULL, + .mmap_user_buffer = NULL, + .init = NULL, + .deinit = NULL, + .enable = NULL, + .disable = NULL, + .is_enabled = NULL, + .reset = NULL, + .flush = NULL, + .poll = NULL, + .bind_channel = NULL, + .unbind_channel = NULL, + .max_entries = NULL, + }, +#endif /* CONFIG_GK20A_CTXSW_TRACE */ + .mm = { + .support_sparse = gm20b_mm_support_sparse, + .gmmu_map = gk20a_locked_gmmu_map, + .gmmu_unmap = gk20a_locked_gmmu_unmap, + .vm_bind_channel = gk20a_vm_bind_channel, + .fb_flush = gk20a_mm_fb_flush, + .l2_invalidate = gk20a_mm_l2_invalidate, + .l2_flush = gk20a_mm_l2_flush, + .cbc_clean = gk20a_mm_cbc_clean, + .set_big_page_size = gm20b_mm_set_big_page_size, + .get_big_page_sizes = gm20b_mm_get_big_page_sizes, + .get_default_big_page_size = gp10b_mm_get_default_big_page_size, + .gpu_phys_addr = gv11b_gpu_phys_addr, + .get_mmu_levels = gp10b_mm_get_mmu_levels, + .get_vidmem_size = gv100_mm_get_vidmem_size, + .init_pdb = gp10b_mm_init_pdb, + .init_mm_setup_hw = gv11b_init_mm_setup_hw, + .is_bar1_supported = gv11b_mm_is_bar1_supported, + .alloc_inst_block = gk20a_alloc_inst_block, + .init_inst_block = gv11b_init_inst_block, + .mmu_fault_pending = gv11b_mm_mmu_fault_pending, + .get_kind_invalid = gm20b_get_kind_invalid, + .get_kind_pitch = gm20b_get_kind_pitch, + .init_bar2_vm = gb10b_init_bar2_vm, + .init_bar2_mm_hw_setup = gv11b_init_bar2_mm_hw_setup, + .remove_bar2_vm = gv11b_mm_remove_bar2_vm, + .fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy, + .get_flush_retries = gv100_mm_get_flush_retries, + }, + .pramin = { + .enter = gk20a_pramin_enter, + .exit = gk20a_pramin_exit, + .data032_r = pram_data032_r, + }, + .pmu = { + .init_wpr_region = gm20b_pmu_init_acr, + .load_lsfalcon_ucode = gp106_load_falcon_ucode, + .is_lazy_bootstrap = gp106_is_lazy_bootstrap, + .is_priv_load = gp106_is_priv_load, + .prepare_ucode = gp106_prepare_ucode_blob, + .pmu_setup_hw_and_bootstrap = gp106_bootstrap_hs_flcn, + .get_wpr = gp106_wpr_info, + .alloc_blob_space = gp106_alloc_blob_space, + .pmu_populate_loader_cfg = gp106_pmu_populate_loader_cfg, + .flcn_populate_bl_dmem_desc = gp106_flcn_populate_bl_dmem_desc, + .falcon_wait_for_halt = sec2_wait_for_halt, + .falcon_clear_halt_interrupt_status = + sec2_clear_halt_interrupt_status, + .init_falcon_setup_hw = init_sec2_setup_hw1, + .pmu_queue_tail = gk20a_pmu_queue_tail, + .pmu_get_queue_head = pwr_pmu_queue_head_r, + .pmu_mutex_release = gk20a_pmu_mutex_release, + .is_pmu_supported = gp106_is_pmu_supported, + .pmu_pg_supported_engines_list = gp106_pmu_pg_engines_list, + .pmu_elpg_statistics = gp106_pmu_elpg_statistics, + .pmu_mutex_acquire = gk20a_pmu_mutex_acquire, + .pmu_is_lpwr_feature_supported = + gp106_pmu_is_lpwr_feature_supported, + .pmu_msgq_tail = gk20a_pmu_msgq_tail, + .pmu_pg_engines_feature_list = gp106_pmu_pg_feature_list, + .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v, + .pmu_queue_head = gk20a_pmu_queue_head, + .pmu_pg_param_post_init = nvgpu_lpwr_post_init, + .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v, + .pmu_pg_init_param = gp106_pg_param_init, + .reset_engine = gp106_pmu_engine_reset, + .write_dmatrfbase = gp10b_write_dmatrfbase, + .pmu_mutex_size = pwr_pmu_mutex__size_1_v, + .is_engine_in_reset = gp106_pmu_is_engine_in_reset, + .pmu_get_queue_tail = pwr_pmu_queue_tail_r, + }, + .clk = { + .init_clk_support = gp106_init_clk_support, + .get_crystal_clk_hz = gp106_crystal_clk_hz, + .measure_freq = gp106_clk_measure_freq, + .suspend_clk_support = gp106_suspend_clk_support, + }, + .clk_arb = { + .get_arbiter_clk_domains = gp106_get_arbiter_clk_domains, + .get_arbiter_clk_range = gp106_get_arbiter_clk_range, + .get_arbiter_clk_default = gp106_get_arbiter_clk_default, + .get_current_pstate = nvgpu_clk_arb_get_current_pstate, + }, + .regops = { + .get_global_whitelist_ranges = + gv100_get_global_whitelist_ranges, + .get_global_whitelist_ranges_count = + gv100_get_global_whitelist_ranges_count, + .get_context_whitelist_ranges = + gv100_get_context_whitelist_ranges, + .get_context_whitelist_ranges_count = + gv100_get_context_whitelist_ranges_count, + .get_runcontrol_whitelist = gv100_get_runcontrol_whitelist, + .get_runcontrol_whitelist_count = + gv100_get_runcontrol_whitelist_count, + .get_runcontrol_whitelist_ranges = + gv100_get_runcontrol_whitelist_ranges, + .get_runcontrol_whitelist_ranges_count = + gv100_get_runcontrol_whitelist_ranges_count, + .get_qctl_whitelist = gv100_get_qctl_whitelist, + .get_qctl_whitelist_count = gv100_get_qctl_whitelist_count, + .get_qctl_whitelist_ranges = gv100_get_qctl_whitelist_ranges, + .get_qctl_whitelist_ranges_count = + gv100_get_qctl_whitelist_ranges_count, + .apply_smpc_war = gv100_apply_smpc_war, + }, + .mc = { + .intr_enable = mc_gv11b_intr_enable, + .intr_unit_config = mc_gp10b_intr_unit_config, + .isr_stall = mc_gp10b_isr_stall, + .intr_stall = mc_gp10b_intr_stall, + .intr_stall_pause = mc_gp10b_intr_stall_pause, + .intr_stall_resume = mc_gp10b_intr_stall_resume, + .intr_nonstall = mc_gp10b_intr_nonstall, + .intr_nonstall_pause = mc_gp10b_intr_nonstall_pause, + .intr_nonstall_resume = mc_gp10b_intr_nonstall_resume, + .enable = gk20a_mc_enable, + .disable = gk20a_mc_disable, + .reset = gk20a_mc_reset, + .boot_0 = gk20a_mc_boot_0, + .is_intr1_pending = mc_gp10b_is_intr1_pending, + .is_intr_hub_pending = gv11b_mc_is_intr_hub_pending, + }, + .debug = { + .show_dump = gk20a_debug_show_dump, + }, + .dbg_session_ops = { + .exec_reg_ops = exec_regops_gk20a, + .dbg_set_powergate = dbg_set_powergate, + .check_and_set_global_reservation = + nvgpu_check_and_set_global_reservation, + .check_and_set_context_reservation = + nvgpu_check_and_set_context_reservation, + .release_profiler_reservation = + nvgpu_release_profiler_reservation, + .perfbuffer_enable = gv11b_perfbuf_enable_locked, + .perfbuffer_disable = gv11b_perfbuf_disable_locked, + }, + .bus = { + .init_hw = gk20a_bus_init_hw, + .isr = gk20a_bus_isr, + .read_ptimer = gk20a_read_ptimer, + .get_timestamps_zipper = nvgpu_get_timestamps_zipper, + .bar1_bind = NULL, + }, +#if defined(CONFIG_GK20A_CYCLE_STATS) + .css = { + .enable_snapshot = gv11b_css_hw_enable_snapshot, + .disable_snapshot = gv11b_css_hw_disable_snapshot, + .check_data_available = gv11b_css_hw_check_data_available, + .set_handled_snapshots = css_hw_set_handled_snapshots, + .allocate_perfmon_ids = css_gr_allocate_perfmon_ids, + .release_perfmon_ids = css_gr_release_perfmon_ids, + }, +#endif + .xve = { + .get_speed = xve_get_speed_gp106, + .set_speed = xve_set_speed_gp106, + .available_speeds = xve_available_speeds_gp106, + .xve_readl = xve_xve_readl_gp106, + .xve_writel = xve_xve_writel_gp106, + .disable_aspm = xve_disable_aspm_gp106, + .reset_gpu = xve_reset_gpu_gp106, +#if defined(CONFIG_PCI_MSI) + .rearm_msi = xve_rearm_msi_gp106, +#endif + .enable_shadow_rom = xve_enable_shadow_rom_gp106, + .disable_shadow_rom = xve_disable_shadow_rom_gp106, + }, + .falcon = { + .falcon_hal_sw_init = gp106_falcon_hal_sw_init, + }, + .priv_ring = { + .isr = gp10b_priv_ring_isr, + }, + .chip_init_gpu_characteristics = gv100_init_gpu_characteristics, + .get_litter_value = gv100_get_litter_value, +}; + +int gv100_init_hal(struct gk20a *g) +{ + struct gpu_ops *gops = &g->ops; + + gops->bios = gv100_ops.bios; + gops->ltc = gv100_ops.ltc; + gops->ce2 = gv100_ops.ce2; + gops->gr = gv100_ops.gr; + gops->fb = gv100_ops.fb; + gops->clock_gating = gv100_ops.clock_gating; + gops->fifo = gv100_ops.fifo; + gops->gr_ctx = gv100_ops.gr_ctx; + gops->mm = gv100_ops.mm; +#ifdef CONFIG_GK20A_CTXSW_TRACE + gops->fecs_trace = gv100_ops.fecs_trace; +#endif + gops->pramin = gv100_ops.pramin; + gops->therm = gv100_ops.therm; + gops->pmu = gv100_ops.pmu; + gops->regops = gv100_ops.regops; + gops->mc = gv100_ops.mc; + gops->debug = gv100_ops.debug; + gops->dbg_session_ops = gv100_ops.dbg_session_ops; + gops->bus = gv100_ops.bus; +#if defined(CONFIG_GK20A_CYCLE_STATS) + gops->css = gv100_ops.css; +#endif + gops->xve = gv100_ops.xve; + gops->falcon = gv100_ops.falcon; + gops->priv_ring = gv100_ops.priv_ring; + + /* clocks */ + gops->clk.init_clk_support = gv100_ops.clk.init_clk_support; + gops->clk.get_crystal_clk_hz = gv100_ops.clk.get_crystal_clk_hz; + gops->clk.measure_freq = gv100_ops.clk.measure_freq; + gops->clk.suspend_clk_support = gv100_ops.clk.suspend_clk_support; + + /* Lone functions */ + gops->chip_init_gpu_characteristics = + gv100_ops.chip_init_gpu_characteristics; + gops->get_litter_value = gv100_ops.get_litter_value; + + __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, true); + __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, true); + __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, true); + __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); + /* for now */ + __nvgpu_set_enabled(g, NVGPU_PMU_PSTATE, false); + + g->pmu_lsf_pmu_wpr_init_done = 0; + g->bootstrap_owner = LSF_FALCON_ID_SEC2; + + g->name = "gv10x"; + + return 0; +} diff --git a/drivers/gpu/nvgpu/gv100/hal_gv100.h b/drivers/gpu/nvgpu/gv100/hal_gv100.h new file mode 100644 index 000000000..7dcf1d77c --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/hal_gv100.h @@ -0,0 +1,30 @@ +/* + * GV100 Tegra HAL interface + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVGPU_HAL_GV11B_H +#define _NVGPU_HAL_GV11B_H +struct gk20a; + +int gv100_init_hal(struct gk20a *gops); +#endif diff --git a/drivers/gpu/nvgpu/gv100/mm_gv100.c b/drivers/gpu/nvgpu/gv100/mm_gv100.c new file mode 100644 index 000000000..1b46faaea --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/mm_gv100.c @@ -0,0 +1,55 @@ +/* + * GV100 memory management + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gk20a/gk20a.h" +#include "gv100/mm_gv100.h" + +#include + +size_t gv100_mm_get_vidmem_size(struct gk20a *g) +{ + u32 range = gk20a_readl(g, fb_mmu_local_memory_range_r()); + u32 mag = fb_mmu_local_memory_range_lower_mag_v(range); + u32 scale = fb_mmu_local_memory_range_lower_scale_v(range); + u32 ecc = fb_mmu_local_memory_range_ecc_mode_v(range); + size_t bytes = ((size_t)mag << scale) * SZ_1M; + + if (ecc) + bytes = bytes / 16 * 15; + + return bytes; +} + +u32 gv100_mm_get_flush_retries(struct gk20a *g, enum nvgpu_flush_op op) +{ + switch (op) { + /* GV100 has a large FB so it needs larger timeouts */ + case NVGPU_FLUSH_FB: + return 2000; + case NVGPU_FLUSH_L2_FLUSH: + return 2000; + default: + return 200; /* Default retry timer */ + } +} diff --git a/drivers/gpu/nvgpu/gv100/mm_gv100.h b/drivers/gpu/nvgpu/gv100/mm_gv100.h new file mode 100644 index 000000000..ea8965036 --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/mm_gv100.h @@ -0,0 +1,33 @@ +/* + * GV100 memory management + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef MM_GV100_H +#define MM_GV100_H + +struct gk20a; + +size_t gv100_mm_get_vidmem_size(struct gk20a *g); +u32 gv100_mm_get_flush_retries(struct gk20a *g, enum nvgpu_flush_op op); + +#endif diff --git a/drivers/gpu/nvgpu/gv100/regops_gv100.c b/drivers/gpu/nvgpu/gv100/regops_gv100.c new file mode 100644 index 000000000..00f054188 --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/regops_gv100.c @@ -0,0 +1,463 @@ +/* + * Tegra GV100 GPU Driver Register Ops + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gk20a/gk20a.h" +#include "gk20a/dbg_gpu_gk20a.h" +#include "gk20a/regops_gk20a.h" +#include "regops_gv100.h" + +static const struct regop_offset_range gv100_global_whitelist_ranges[] = { + { 0x000004f0, 1}, + { 0x00001a00, 3}, + { 0x00002800, 128}, + { 0x00009400, 1}, + { 0x00009410, 1}, + { 0x00009480, 1}, + { 0x00020200, 24}, + { 0x00021c00, 4}, + { 0x00021c14, 3}, + { 0x00021c24, 1}, + { 0x00021c2c, 69}, + { 0x00021d44, 1}, + { 0x00021d4c, 1}, + { 0x00021d54, 1}, + { 0x00021d5c, 1}, + { 0x00021d64, 2}, + { 0x00021d70, 16}, + { 0x00022430, 7}, + { 0x00022450, 1}, + { 0x0002245c, 2}, + { 0x00070000, 5}, + { 0x000884e0, 1}, + { 0x0008e00c, 1}, + { 0x00100c18, 3}, + { 0x00100c84, 1}, + { 0x00104038, 1}, + { 0x0010a0a8, 1}, + { 0x0010a4f0, 1}, + { 0x0010e490, 1}, + { 0x0013cc14, 1}, + { 0x00140028, 1}, + { 0x00140280, 1}, + { 0x001402a0, 1}, + { 0x00140350, 1}, + { 0x00140480, 1}, + { 0x001404a0, 1}, + { 0x00140550, 1}, + { 0x00142028, 1}, + { 0x00142280, 1}, + { 0x001422a0, 1}, + { 0x00142350, 1}, + { 0x00142480, 1}, + { 0x001424a0, 1}, + { 0x00142550, 1}, + { 0x0017e028, 1}, + { 0x0017e280, 1}, + { 0x0017e294, 1}, + { 0x0017e29c, 2}, + { 0x0017e2ac, 1}, + { 0x0017e350, 1}, + { 0x0017e39c, 1}, + { 0x0017e480, 1}, + { 0x0017e4a0, 1}, + { 0x0017e550, 1}, + { 0x00180040, 41}, + { 0x001800ec, 10}, + { 0x00180240, 41}, + { 0x001802ec, 10}, + { 0x00180440, 41}, + { 0x001804ec, 10}, + { 0x00180640, 41}, + { 0x001806ec, 10}, + { 0x00180840, 41}, + { 0x001808ec, 10}, + { 0x00180a40, 41}, + { 0x00180aec, 10}, + { 0x00180c40, 41}, + { 0x00180cec, 10}, + { 0x00180e40, 41}, + { 0x00180eec, 10}, + { 0x001a0040, 41}, + { 0x001a00ec, 10}, + { 0x001a0240, 41}, + { 0x001a02ec, 10}, + { 0x001a0440, 41}, + { 0x001a04ec, 10}, + { 0x001a0640, 41}, + { 0x001a06ec, 10}, + { 0x001a0840, 41}, + { 0x001a08ec, 10}, + { 0x001a0a40, 41}, + { 0x001a0aec, 10}, + { 0x001a0c40, 41}, + { 0x001a0cec, 10}, + { 0x001a0e40, 41}, + { 0x001a0eec, 10}, + { 0x001b0040, 41}, + { 0x001b00ec, 10}, + { 0x001b0240, 41}, + { 0x001b02ec, 10}, + { 0x001b0440, 41}, + { 0x001b04ec, 10}, + { 0x001b0640, 41}, + { 0x001b06ec, 10}, + { 0x001b0840, 41}, + { 0x001b08ec, 10}, + { 0x001b0a40, 41}, + { 0x001b0aec, 10}, + { 0x001b0c40, 41}, + { 0x001b0cec, 10}, + { 0x001b0e40, 41}, + { 0x001b0eec, 10}, + { 0x001b4000, 1}, + { 0x001b4008, 1}, + { 0x001b4010, 3}, + { 0x001b4020, 3}, + { 0x001b4030, 3}, + { 0x001b4040, 3}, + { 0x001b4050, 3}, + { 0x001b4060, 4}, + { 0x001b4074, 7}, + { 0x001b4094, 3}, + { 0x001b40a4, 1}, + { 0x001b4100, 6}, + { 0x001b4128, 1}, + { 0x001b8000, 1}, + { 0x001b8008, 1}, + { 0x001b8010, 2}, + { 0x001bc000, 1}, + { 0x001bc008, 1}, + { 0x001bc010, 2}, + { 0x001be000, 1}, + { 0x001be008, 1}, + { 0x001be010, 2}, + { 0x00400500, 1}, + { 0x0040415c, 1}, + { 0x00404468, 1}, + { 0x00404498, 1}, + { 0x00405800, 1}, + { 0x00405840, 2}, + { 0x00405850, 1}, + { 0x00405908, 1}, + { 0x00405b40, 1}, + { 0x00405b50, 1}, + { 0x00406024, 5}, + { 0x00407010, 1}, + { 0x00407808, 1}, + { 0x0040803c, 1}, + { 0x00408804, 1}, + { 0x0040880c, 1}, + { 0x00408900, 2}, + { 0x00408910, 1}, + { 0x00408944, 1}, + { 0x00408984, 1}, + { 0x004090a8, 1}, + { 0x004098a0, 1}, + { 0x00409b00, 1}, + { 0x0041000c, 1}, + { 0x00410110, 1}, + { 0x00410184, 1}, + { 0x0041040c, 1}, + { 0x00410510, 1}, + { 0x00410584, 1}, + { 0x00418000, 1}, + { 0x00418008, 1}, + { 0x00418380, 2}, + { 0x00418400, 2}, + { 0x004184a0, 1}, + { 0x00418604, 1}, + { 0x00418680, 1}, + { 0x00418704, 1}, + { 0x00418714, 1}, + { 0x00418800, 1}, + { 0x0041881c, 1}, + { 0x00418830, 1}, + { 0x00418884, 1}, + { 0x004188b0, 1}, + { 0x004188c8, 3}, + { 0x004188fc, 1}, + { 0x00418b04, 1}, + { 0x00418c04, 1}, + { 0x00418c10, 8}, + { 0x00418c88, 1}, + { 0x00418d00, 1}, + { 0x00418e00, 1}, + { 0x00418e08, 1}, + { 0x00418e34, 1}, + { 0x00418e40, 4}, + { 0x00418e58, 16}, + { 0x00418f08, 1}, + { 0x00419000, 1}, + { 0x0041900c, 1}, + { 0x00419018, 1}, + { 0x00419854, 1}, + { 0x00419864, 1}, + { 0x00419a04, 2}, + { 0x00419a14, 1}, + { 0x00419ab0, 1}, + { 0x00419ab8, 3}, + { 0x00419c0c, 1}, + { 0x00419c8c, 2}, + { 0x00419d00, 1}, + { 0x00419d08, 2}, + { 0x00419e00, 11}, + { 0x00419e34, 2}, + { 0x00419e44, 11}, + { 0x00419e74, 10}, + { 0x00419ea4, 1}, + { 0x00419eac, 2}, + { 0x00419ee8, 1}, + { 0x00419ef0, 28}, + { 0x00419f70, 1}, + { 0x00419f78, 2}, + { 0x00419f98, 2}, + { 0x0041a02c, 2}, + { 0x0041a0a8, 1}, + { 0x0041a8a0, 3}, + { 0x0041b014, 1}, + { 0x0041b0a0, 1}, + { 0x0041b0cc, 1}, + { 0x0041b1dc, 1}, + { 0x0041be0c, 3}, + { 0x0041bea0, 1}, + { 0x0041becc, 1}, + { 0x0041bfdc, 1}, + { 0x0041c054, 1}, + { 0x0041c2b0, 1}, + { 0x0041c2b8, 3}, + { 0x0041c40c, 1}, + { 0x0041c48c, 2}, + { 0x0041c500, 1}, + { 0x0041c508, 2}, + { 0x0041c600, 11}, + { 0x0041c634, 2}, + { 0x0041c644, 11}, + { 0x0041c674, 10}, + { 0x0041c6a4, 1}, + { 0x0041c6ac, 2}, + { 0x0041c6e8, 1}, + { 0x0041c6f0, 28}, + { 0x0041c770, 1}, + { 0x0041c778, 2}, + { 0x0041c798, 2}, + { 0x0041c854, 1}, + { 0x0041cab0, 1}, + { 0x0041cab8, 3}, + { 0x0041cc0c, 1}, + { 0x0041cc8c, 2}, + { 0x0041cd00, 1}, + { 0x0041cd08, 2}, + { 0x0041ce00, 11}, + { 0x0041ce34, 2}, + { 0x0041ce44, 11}, + { 0x0041ce74, 10}, + { 0x0041cea4, 1}, + { 0x0041ceac, 2}, + { 0x0041cee8, 1}, + { 0x0041cef0, 28}, + { 0x0041cf70, 1}, + { 0x0041cf78, 2}, + { 0x0041cf98, 2}, + { 0x00500384, 1}, + { 0x005004a0, 1}, + { 0x00500604, 1}, + { 0x00500680, 1}, + { 0x00500714, 1}, + { 0x0050081c, 1}, + { 0x00500884, 1}, + { 0x005008b0, 1}, + { 0x005008c8, 3}, + { 0x005008fc, 1}, + { 0x00500b04, 1}, + { 0x00500c04, 1}, + { 0x00500c10, 8}, + { 0x00500c88, 1}, + { 0x00500d00, 1}, + { 0x00500e08, 1}, + { 0x00500f08, 1}, + { 0x00501000, 1}, + { 0x0050100c, 1}, + { 0x00501018, 1}, + { 0x00501854, 1}, + { 0x00501ab0, 1}, + { 0x00501ab8, 3}, + { 0x00501c0c, 1}, + { 0x00501c8c, 2}, + { 0x00501d00, 1}, + { 0x00501d08, 2}, + { 0x00501e00, 11}, + { 0x00501e34, 2}, + { 0x00501e44, 11}, + { 0x00501e74, 10}, + { 0x00501ea4, 1}, + { 0x00501eac, 2}, + { 0x00501ee8, 1}, + { 0x00501ef0, 28}, + { 0x00501f70, 1}, + { 0x00501f78, 2}, + { 0x00501f98, 2}, + { 0x0050202c, 2}, + { 0x005020a8, 1}, + { 0x005028a0, 3}, + { 0x00503014, 1}, + { 0x005030a0, 1}, + { 0x005030cc, 1}, + { 0x005031dc, 1}, + { 0x00503e14, 1}, + { 0x00503ea0, 1}, + { 0x00503ecc, 1}, + { 0x00503fdc, 1}, + { 0x00504054, 1}, + { 0x005042b0, 1}, + { 0x005042b8, 3}, + { 0x0050440c, 1}, + { 0x0050448c, 2}, + { 0x00504500, 1}, + { 0x00504508, 2}, + { 0x00504600, 11}, + { 0x00504634, 2}, + { 0x00504644, 11}, + { 0x00504674, 10}, + { 0x005046a4, 1}, + { 0x005046ac, 2}, + { 0x005046e8, 1}, + { 0x005046f0, 28}, + { 0x00504770, 1}, + { 0x00504778, 2}, + { 0x00504798, 2}, + { 0x00504854, 1}, + { 0x00504ab0, 1}, + { 0x00504ab8, 3}, + { 0x00504c0c, 1}, + { 0x00504c8c, 2}, + { 0x00504d00, 1}, + { 0x00504d08, 2}, + { 0x00504e00, 11}, + { 0x00504e34, 2}, + { 0x00504e44, 11}, + { 0x00504e74, 10}, + { 0x00504ea4, 1}, + { 0x00504eac, 2}, + { 0x00504ee8, 1}, + { 0x00504ef0, 28}, + { 0x00504f70, 1}, + { 0x00504f78, 2}, + { 0x00504f98, 2}, + { 0x00900100, 1}, + { 0x009a0100, 1},}; + + +static const u32 gv100_global_whitelist_ranges_count = + ARRAY_SIZE(gv100_global_whitelist_ranges); + +/* context */ + +/* runcontrol */ +static const u32 gv100_runcontrol_whitelist[] = { +}; +static const u32 gv100_runcontrol_whitelist_count = + ARRAY_SIZE(gv100_runcontrol_whitelist); + +static const struct regop_offset_range gv100_runcontrol_whitelist_ranges[] = { +}; +static const u32 gv100_runcontrol_whitelist_ranges_count = + ARRAY_SIZE(gv100_runcontrol_whitelist_ranges); + + +/* quad ctl */ +static const u32 gv100_qctl_whitelist[] = { +}; +static const u32 gv100_qctl_whitelist_count = + ARRAY_SIZE(gv100_qctl_whitelist); + +static const struct regop_offset_range gv100_qctl_whitelist_ranges[] = { +}; +static const u32 gv100_qctl_whitelist_ranges_count = + ARRAY_SIZE(gv100_qctl_whitelist_ranges); + +const struct regop_offset_range *gv100_get_global_whitelist_ranges(void) +{ + return gv100_global_whitelist_ranges; +} + +int gv100_get_global_whitelist_ranges_count(void) +{ + return gv100_global_whitelist_ranges_count; +} + +const struct regop_offset_range *gv100_get_context_whitelist_ranges(void) +{ + return gv100_global_whitelist_ranges; +} + +int gv100_get_context_whitelist_ranges_count(void) +{ + return gv100_global_whitelist_ranges_count; +} + +const u32 *gv100_get_runcontrol_whitelist(void) +{ + return gv100_runcontrol_whitelist; +} + +int gv100_get_runcontrol_whitelist_count(void) +{ + return gv100_runcontrol_whitelist_count; +} + +const struct regop_offset_range *gv100_get_runcontrol_whitelist_ranges(void) +{ + return gv100_runcontrol_whitelist_ranges; +} + +int gv100_get_runcontrol_whitelist_ranges_count(void) +{ + return gv100_runcontrol_whitelist_ranges_count; +} + +const u32 *gv100_get_qctl_whitelist(void) +{ + return gv100_qctl_whitelist; +} + +int gv100_get_qctl_whitelist_count(void) +{ + return gv100_qctl_whitelist_count; +} + +const struct regop_offset_range *gv100_get_qctl_whitelist_ranges(void) +{ + return gv100_qctl_whitelist_ranges; +} + +int gv100_get_qctl_whitelist_ranges_count(void) +{ + return gv100_qctl_whitelist_ranges_count; +} + +int gv100_apply_smpc_war(struct dbg_session_gk20a *dbg_s) +{ + /* Not needed on gv100 */ + return 0; +} diff --git a/drivers/gpu/nvgpu/gv100/regops_gv100.h b/drivers/gpu/nvgpu/gv100/regops_gv100.h new file mode 100644 index 000000000..06e5b8e1a --- /dev/null +++ b/drivers/gpu/nvgpu/gv100/regops_gv100.h @@ -0,0 +1,42 @@ +/* + * + * Tegra GV100 GPU Driver Register Ops + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __REGOPS_GV100_H_ +#define __REGOPS_GV100_H_ + +const struct regop_offset_range *gv100_get_global_whitelist_ranges(void); +int gv100_get_global_whitelist_ranges_count(void); +const struct regop_offset_range *gv100_get_context_whitelist_ranges(void); +int gv100_get_context_whitelist_ranges_count(void); +const u32 *gv100_get_runcontrol_whitelist(void); +int gv100_get_runcontrol_whitelist_count(void); +const struct regop_offset_range *gv100_get_runcontrol_whitelist_ranges(void); +int gv100_get_runcontrol_whitelist_ranges_count(void); +const u32 *gv100_get_qctl_whitelist(void); +int gv100_get_qctl_whitelist_count(void); +const struct regop_offset_range *gv100_get_qctl_whitelist_ranges(void); +int gv100_get_qctl_whitelist_ranges_count(void); +int gv100_apply_smpc_war(struct dbg_session_gk20a *dbg_s); + +#endif /* __REGOPS_GV11B_H_ */ diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.c b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c new file mode 100644 index 000000000..b245dbc66 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.c @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifdef CONFIG_DEBUG_FS +#include +#endif + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gk20a/gk20a.h" +#include "acr_gv11b.h" +#include "pmu_gv11b.h" +#include "gk20a/pmu_gk20a.h" +#include "gm20b/mm_gm20b.h" +#include "gm20b/acr_gm20b.h" +#include "gp106/acr_gp106.h" + +#include + +/*Defines*/ +#define gv11b_dbg_pmu(fmt, arg...) \ + gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) + +static void flcn64_set_dma(struct falc_u64 *dma_addr, u64 value) +{ + dma_addr->lo |= u64_lo32(value); + dma_addr->hi |= u64_hi32(value); +} +/*Externs*/ + +/*Forwards*/ + +/*Loads ACR bin to FB mem and bootstraps PMU with bootloader code + * start and end are addresses of ucode blob in non-WPR region*/ +int gv11b_bootstrap_hs_flcn(struct gk20a *g) +{ + struct mm_gk20a *mm = &g->mm; + struct vm_gk20a *vm = mm->pmu.vm; + int err = 0; + u64 *acr_dmem; + u32 img_size_in_bytes = 0; + u32 status, size, index; + u64 start; + struct acr_desc *acr = &g->acr; + struct nvgpu_firmware *acr_fw = acr->acr_fw; + struct flcn_bl_dmem_desc_v1 *bl_dmem_desc = &acr->bl_dmem_desc_v1; + u32 *acr_ucode_header_t210_load; + u32 *acr_ucode_data_t210_load; + + start = nvgpu_mem_get_addr(g, &acr->ucode_blob); + size = acr->ucode_blob.size; + + gv11b_dbg_pmu("acr ucode blob start %llx\n", start); + gv11b_dbg_pmu("acr ucode blob size %x\n", size); + + gv11b_dbg_pmu(""); + + if (!acr_fw) { + /*First time init case*/ + acr_fw = nvgpu_request_firmware(g, + GM20B_HSBIN_PMU_UCODE_IMAGE, 0); + if (!acr_fw) { + nvgpu_err(g, "pmu ucode get fail"); + return -ENOENT; + } + acr->acr_fw = acr_fw; + acr->hsbin_hdr = (struct bin_hdr *)acr_fw->data; + acr->fw_hdr = (struct acr_fw_header *)(acr_fw->data + + acr->hsbin_hdr->header_offset); + acr_ucode_data_t210_load = (u32 *)(acr_fw->data + + acr->hsbin_hdr->data_offset); + acr_ucode_header_t210_load = (u32 *)(acr_fw->data + + acr->fw_hdr->hdr_offset); + img_size_in_bytes = ALIGN((acr->hsbin_hdr->data_size), 256); + + gv11b_dbg_pmu("sig dbg offset %u\n", + acr->fw_hdr->sig_dbg_offset); + gv11b_dbg_pmu("sig dbg size %u\n", acr->fw_hdr->sig_dbg_size); + gv11b_dbg_pmu("sig prod offset %u\n", + acr->fw_hdr->sig_prod_offset); + gv11b_dbg_pmu("sig prod size %u\n", + acr->fw_hdr->sig_prod_size); + gv11b_dbg_pmu("patch loc %u\n", acr->fw_hdr->patch_loc); + gv11b_dbg_pmu("patch sig %u\n", acr->fw_hdr->patch_sig); + gv11b_dbg_pmu("header offset %u\n", acr->fw_hdr->hdr_offset); + gv11b_dbg_pmu("header size %u\n", acr->fw_hdr->hdr_size); + + /* Lets patch the signatures first.. */ + if (acr_ucode_patch_sig(g, acr_ucode_data_t210_load, + (u32 *)(acr_fw->data + + acr->fw_hdr->sig_prod_offset), + (u32 *)(acr_fw->data + + acr->fw_hdr->sig_dbg_offset), + (u32 *)(acr_fw->data + + acr->fw_hdr->patch_loc), + (u32 *)(acr_fw->data + + acr->fw_hdr->patch_sig)) < 0) { + nvgpu_err(g, "patch signatures fail"); + err = -1; + goto err_release_acr_fw; + } + err = nvgpu_dma_alloc_map_sys(vm, img_size_in_bytes, + &acr->acr_ucode); + if (err) { + err = -ENOMEM; + goto err_release_acr_fw; + } + + for (index = 0; index < 9; index++) + gv11b_dbg_pmu("acr_ucode_header_t210_load %u\n", + acr_ucode_header_t210_load[index]); + + acr_dmem = (u64 *) + &(((u8 *)acr_ucode_data_t210_load)[ + acr_ucode_header_t210_load[2]]); + acr->acr_dmem_desc_v1 = (struct flcn_acr_desc_v1 *)((u8 *)( + acr->acr_ucode.cpu_va) + acr_ucode_header_t210_load[2]); + ((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_start = + (start); + ((struct flcn_acr_desc_v1 *)acr_dmem)->nonwpr_ucode_blob_size = + size; + ((struct flcn_acr_desc_v1 *)acr_dmem)->regions.no_regions = 2; + ((struct flcn_acr_desc_v1 *)acr_dmem)->wpr_offset = 0; + + nvgpu_mem_wr_n(g, &acr->acr_ucode, 0, + acr_ucode_data_t210_load, img_size_in_bytes); + /* + * In order to execute this binary, we will be using + * a bootloader which will load this image into PMU IMEM/DMEM. + * Fill up the bootloader descriptor for PMU HAL to use.. + * TODO: Use standard descriptor which the generic bootloader is + * checked in. + */ + bl_dmem_desc->signature[0] = 0; + bl_dmem_desc->signature[1] = 0; + bl_dmem_desc->signature[2] = 0; + bl_dmem_desc->signature[3] = 0; + bl_dmem_desc->ctx_dma = GK20A_PMU_DMAIDX_VIRT; + flcn64_set_dma(&bl_dmem_desc->code_dma_base, + acr->acr_ucode.gpu_va); + bl_dmem_desc->non_sec_code_off = acr_ucode_header_t210_load[0]; + bl_dmem_desc->non_sec_code_size = acr_ucode_header_t210_load[1]; + bl_dmem_desc->sec_code_off = acr_ucode_header_t210_load[5]; + bl_dmem_desc->sec_code_size = acr_ucode_header_t210_load[6]; + bl_dmem_desc->code_entry_point = 0; /* Start at 0th offset */ + flcn64_set_dma(&bl_dmem_desc->data_dma_base, + acr->acr_ucode.gpu_va + + acr_ucode_header_t210_load[2]); + bl_dmem_desc->data_size = acr_ucode_header_t210_load[3]; + } else + acr->acr_dmem_desc_v1->nonwpr_ucode_blob_size = 0; + status = pmu_exec_gen_bl(g, bl_dmem_desc, 1); + if (status != 0) { + err = status; + goto err_free_ucode_map; + } + + return 0; +err_free_ucode_map: + nvgpu_dma_unmap_free(vm, &acr->acr_ucode); +err_release_acr_fw: + nvgpu_release_firmware(g, acr_fw); + acr->acr_fw = NULL; + + return err; +} + +static int bl_bootstrap(struct nvgpu_pmu *pmu, + struct flcn_bl_dmem_desc_v1 *pbl_desc, u32 bl_sz) +{ + struct gk20a *g = gk20a_from_pmu(pmu); + struct acr_desc *acr = &g->acr; + struct mm_gk20a *mm = &g->mm; + u32 virt_addr = 0; + struct hsflcn_bl_desc *pmu_bl_gm10x_desc = g->acr.pmu_hsbl_desc; + u32 dst; + + gk20a_dbg_fn(""); + + gk20a_writel(g, pwr_falcon_itfen_r(), + gk20a_readl(g, pwr_falcon_itfen_r()) | + pwr_falcon_itfen_ctxen_enable_f()); + gk20a_writel(g, pwr_pmu_new_instblk_r(), + pwr_pmu_new_instblk_ptr_f( + nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) | + pwr_pmu_new_instblk_valid_f(1) | + pwr_pmu_new_instblk_target_sys_ncoh_f()); + + /*copy bootloader interface structure to dmem*/ + nvgpu_flcn_copy_to_dmem(pmu->flcn, 0, (u8 *)pbl_desc, + sizeof(struct flcn_bl_dmem_desc_v1), 0); + + /* copy bootloader to TOP of IMEM */ + dst = (pwr_falcon_hwcfg_imem_size_v( + gk20a_readl(g, pwr_falcon_hwcfg_r())) << 8) - bl_sz; + + nvgpu_flcn_copy_to_imem(pmu->flcn, dst, + (u8 *)(acr->hsbl_ucode.cpu_va), bl_sz, 0, 0, + pmu_bl_gm10x_desc->bl_start_tag); + + gv11b_dbg_pmu("Before starting falcon with BL\n"); + + virt_addr = pmu_bl_gm10x_desc->bl_start_tag << 8; + + nvgpu_flcn_bootstrap(pmu->flcn, virt_addr); + + return 0; +} + +int gv11b_init_pmu_setup_hw1(struct gk20a *g, + void *desc, u32 bl_sz) +{ + + struct nvgpu_pmu *pmu = &g->pmu; + int err; + + gk20a_dbg_fn(""); + + nvgpu_mutex_acquire(&pmu->isr_mutex); + nvgpu_flcn_reset(pmu->flcn); + pmu->isr_enabled = true; + nvgpu_mutex_release(&pmu->isr_mutex); + + /* setup apertures - virtual */ + gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_UCODE), + pwr_fbif_transcfg_mem_type_physical_f() | + pwr_fbif_transcfg_target_noncoherent_sysmem_f()); + gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_VIRT), + pwr_fbif_transcfg_mem_type_virtual_f()); + /* setup apertures - physical */ + gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_VID), + pwr_fbif_transcfg_mem_type_physical_f() | + pwr_fbif_transcfg_target_noncoherent_sysmem_f()); + gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_COH), + pwr_fbif_transcfg_mem_type_physical_f() | + pwr_fbif_transcfg_target_coherent_sysmem_f()); + gk20a_writel(g, pwr_fbif_transcfg_r(GK20A_PMU_DMAIDX_PHYS_SYS_NCOH), + pwr_fbif_transcfg_mem_type_physical_f() | + pwr_fbif_transcfg_target_noncoherent_sysmem_f()); + + /*Copying pmu cmdline args*/ + g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu, + g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK)); + g->ops.pmu_ver.set_pmu_cmdline_args_secure_mode(pmu, 1); + g->ops.pmu_ver.set_pmu_cmdline_args_trace_size( + pmu, GK20A_PMU_TRACE_BUFSIZE); + g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu); + g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx( + pmu, GK20A_PMU_DMAIDX_VIRT); + nvgpu_flcn_copy_to_dmem(pmu->flcn, g->acr.pmu_args, + (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), + g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0); + /*disable irqs for hs falcon booting as we will poll for halt*/ + nvgpu_mutex_acquire(&pmu->isr_mutex); + pmu_enable_irq(pmu, false); + pmu->isr_enabled = false; + nvgpu_mutex_release(&pmu->isr_mutex); + /*Clearing mailbox register used to reflect capabilities*/ + gk20a_writel(g, pwr_falcon_mailbox1_r(), 0); + err = bl_bootstrap(pmu, desc, bl_sz); + if (err) + return err; + return 0; +} diff --git a/drivers/gpu/nvgpu/gv11b/acr_gv11b.h b/drivers/gpu/nvgpu/gv11b/acr_gv11b.h new file mode 100644 index 000000000..72b3ec35a --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/acr_gv11b.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __ACR_GV11B_H_ +#define __ACR_GV11B_H_ + + +int gv11b_bootstrap_hs_flcn(struct gk20a *g); +int gv11b_init_pmu_setup_hw1(struct gk20a *g, + void *desc, u32 bl_sz); +#endif /*__PMU_GP106_H_*/ diff --git a/drivers/gpu/nvgpu/gv11b/ce_gv11b.c b/drivers/gpu/nvgpu/gv11b/ce_gv11b.c new file mode 100644 index 000000000..86518ac7e --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/ce_gv11b.c @@ -0,0 +1,110 @@ +/* + * Volta GPU series Copy Engine. + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "nvgpu/log.h" +#include "nvgpu/bitops.h" + +#include "gk20a/gk20a.h" + +#include "gp10b/ce_gp10b.h" + +#include "ce_gv11b.h" + +#include +#include + +u32 gv11b_ce_get_num_pce(struct gk20a *g) +{ + /* register contains a bitmask indicating which physical copy + * engines are present (and not floorswept). + */ + u32 num_pce; + u32 ce_pce_map = gk20a_readl(g, ce_pce_map_r()); + + num_pce = hweight32(ce_pce_map); + nvgpu_log_info(g, "num PCE: %d", num_pce); + return num_pce; +} + +void gv11b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base) +{ + u32 ce_intr = gk20a_readl(g, ce_intr_status_r(inst_id)); + u32 clear_intr = 0; + + nvgpu_log(g, gpu_dbg_intr, "ce isr 0x%08x 0x%08x", ce_intr, inst_id); + + /* An INVALID_CONFIG interrupt will be generated if a floorswept + * PCE is assigned to a valid LCE in the NV_CE_PCE2LCE_CONFIG + * registers. This is a fatal error and the LCE will have to be + * reset to get back to a working state. + */ + if (ce_intr & ce_intr_status_invalid_config_pending_f()) { + nvgpu_log(g, gpu_dbg_intr, + "ce: inst %d: invalid config", inst_id); + clear_intr |= ce_intr_status_invalid_config_reset_f(); + } + + /* A MTHD_BUFFER_FAULT interrupt will be triggered if any access + * to a method buffer during context load or save encounters a fault. + * This is a fatal interrupt and will require at least the LCE to be + * reset before operations can start again, if not the entire GPU. + */ + if (ce_intr & ce_intr_status_mthd_buffer_fault_pending_f()) { + nvgpu_log(g, gpu_dbg_intr, + "ce: inst %d: mthd buffer fault", inst_id); + clear_intr |= ce_intr_status_mthd_buffer_fault_reset_f(); + } + + gk20a_writel(g, ce_intr_status_r(inst_id), clear_intr); + + gp10b_ce_isr(g, inst_id, pri_base); +} + +u32 gv11b_ce_get_num_lce(struct gk20a *g) +{ + u32 reg_val, num_lce; + + reg_val = gk20a_readl(g, top_num_ces_r()); + num_lce = top_num_ces_value_v(reg_val); + nvgpu_log_info(g, "num LCE: %d", num_lce); + + return num_lce; +} + +void gv11b_ce_mthd_buffer_fault_in_bar2_fault(struct gk20a *g) +{ + u32 reg_val, num_lce, lce, clear_intr; + + num_lce = gv11b_ce_get_num_lce(g); + + for (lce = 0; lce < num_lce; lce++) { + reg_val = gk20a_readl(g, ce_intr_status_r(lce)); + if (reg_val & ce_intr_status_mthd_buffer_fault_pending_f()) { + nvgpu_log(g, gpu_dbg_intr, + "ce: lce %d: mthd buffer fault", lce); + clear_intr = ce_intr_status_mthd_buffer_fault_reset_f(); + gk20a_writel(g, ce_intr_status_r(lce), clear_intr); + } + } +} diff --git a/drivers/gpu/nvgpu/gv11b/ce_gv11b.h b/drivers/gpu/nvgpu/gv11b/ce_gv11b.h new file mode 100644 index 000000000..a0c7e0b11 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/ce_gv11b.h @@ -0,0 +1,35 @@ +/* + * + * Volta GPU series copy engine + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __CE_GV11B_H__ +#define __CE_GV11B_H__ + +struct gk20a; + +void gv11b_ce_mthd_buffer_fault_in_bar2_fault(struct gk20a *g); +u32 gv11b_ce_get_num_lce(struct gk20a *g); +u32 gv11b_ce_get_num_pce(struct gk20a *g); +void gv11b_ce_isr(struct gk20a *g, u32 inst_id, u32 pri_base); + +#endif /*__CE2_GV11B_H__*/ diff --git a/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c new file mode 100644 index 000000000..2eb45a88a --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.c @@ -0,0 +1,206 @@ +/* + * GV11B Cycle stats snapshots support + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include +#include +#include +#include +#include + +#include "gk20a/gk20a.h" +#include "gk20a/css_gr_gk20a.h" +#include "css_gr_gv11b.h" + +#include +#include + +#include +#include + + +/* reports whether the hw queue overflowed */ +static inline bool css_hw_get_overflow_status(struct gk20a *g) +{ + const u32 st = perf_pmasys_control_membuf_status_overflowed_f(); + return st == (gk20a_readl(g, perf_pmasys_control_r()) & st); +} + +/* returns how many pending snapshot entries are pending */ +static inline u32 css_hw_get_pending_snapshots(struct gk20a *g) +{ + return gk20a_readl(g, perf_pmasys_mem_bytes_r()) / + sizeof(struct gk20a_cs_snapshot_fifo_entry); +} + +/* informs hw how many snapshots have been processed (frees up fifo space) */ +static inline void gv11b_css_hw_set_handled_snapshots(struct gk20a *g, u32 done) +{ + if (done > 0) { + gk20a_writel(g, perf_pmasys_mem_bump_r(), + done * sizeof(struct gk20a_cs_snapshot_fifo_entry)); + } +} + +/* disable streaming to memory */ +static void gv11b_css_hw_reset_streaming(struct gk20a *g) +{ + u32 engine_status; + + /* reset the perfmon */ + g->ops.mc.reset(g, mc_enable_perfmon_enabled_f()); + + /* RBUFEMPTY must be set -- otherwise we'll pick up */ + /* snapshot that have been queued up from earlier */ + engine_status = gk20a_readl(g, perf_pmasys_enginestatus_r()); + + /* turn off writes */ + gk20a_writel(g, perf_pmasys_control_r(), + perf_pmasys_control_membuf_clear_status_doit_f()); + + /* pointing all pending snapshots as handled */ + gv11b_css_hw_set_handled_snapshots(g, css_hw_get_pending_snapshots(g)); +} + +int gv11b_css_hw_enable_snapshot(struct channel_gk20a *ch, + struct gk20a_cs_snapshot_client *cs_client) +{ + struct gk20a *g = ch->g; + struct gr_gk20a *gr = &g->gr; + struct gk20a_cs_snapshot *data = gr->cs_data; + u32 snapshot_size = cs_client->snapshot_size; + int ret; + + u32 virt_addr_lo; + u32 virt_addr_hi; + u32 inst_pa_page; + + if (data->hw_snapshot) + return 0; + + if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE) + snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE; + + ret = nvgpu_dma_alloc_map_sys(g->mm.pmu.vm, snapshot_size, + &data->hw_memdesc); + if (ret) + return ret; + + /* perf output buffer may not cross a 4GB boundary - with a separate */ + /* va smaller than that, it won't but check anyway */ + if (!data->hw_memdesc.cpu_va || + data->hw_memdesc.size < snapshot_size || + data->hw_memdesc.gpu_va + u64_lo32(snapshot_size) > SZ_4G) { + ret = -EFAULT; + goto failed_allocation; + } + + data->hw_snapshot = + (struct gk20a_cs_snapshot_fifo_entry *)data->hw_memdesc.cpu_va; + data->hw_end = data->hw_snapshot + + snapshot_size / sizeof(struct gk20a_cs_snapshot_fifo_entry); + data->hw_get = data->hw_snapshot; + memset(data->hw_snapshot, 0xff, snapshot_size); + + virt_addr_lo = u64_lo32(data->hw_memdesc.gpu_va); + virt_addr_hi = u64_hi32(data->hw_memdesc.gpu_va); + + gv11b_css_hw_reset_streaming(g); + + gk20a_writel(g, perf_pmasys_outbase_r(), virt_addr_lo); + gk20a_writel(g, perf_pmasys_outbaseupper_r(), + perf_pmasys_outbaseupper_ptr_f(virt_addr_hi)); + gk20a_writel(g, perf_pmasys_outsize_r(), snapshot_size); + + /* this field is aligned to 4K */ + inst_pa_page = nvgpu_inst_block_addr(g, &g->mm.hwpm.inst_block) >> 12; + + gk20a_writel(g, perf_pmasys_mem_block_r(), + perf_pmasys_mem_block_base_f(inst_pa_page) | + perf_pmasys_mem_block_valid_true_f() | + nvgpu_aperture_mask(g, &g->mm.hwpm.inst_block, + perf_pmasys_mem_block_target_sys_ncoh_f(), + perf_pmasys_mem_block_target_lfb_f())); + + + gk20a_dbg_info("cyclestats: buffer for hardware snapshots enabled\n"); + + return 0; + +failed_allocation: + if (data->hw_memdesc.size) { + nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc); + memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); + } + data->hw_snapshot = NULL; + + return ret; +} + +void gv11b_css_hw_disable_snapshot(struct gr_gk20a *gr) +{ + struct gk20a *g = gr->g; + struct gk20a_cs_snapshot *data = gr->cs_data; + + if (!data->hw_snapshot) + return; + + gv11b_css_hw_reset_streaming(g); + + gk20a_writel(g, perf_pmasys_outbase_r(), 0); + gk20a_writel(g, perf_pmasys_outbaseupper_r(), + perf_pmasys_outbaseupper_ptr_f(0)); + gk20a_writel(g, perf_pmasys_outsize_r(), 0); + + gk20a_writel(g, perf_pmasys_mem_block_r(), + perf_pmasys_mem_block_base_f(0) | + perf_pmasys_mem_block_valid_false_f() | + perf_pmasys_mem_block_target_f(0)); + + nvgpu_dma_unmap_free(g->mm.pmu.vm, &data->hw_memdesc); + memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); + data->hw_snapshot = NULL; + + gk20a_dbg_info("cyclestats: buffer for hardware snapshots disabled\n"); +} + +int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending, + bool *hw_overflow) +{ + struct gk20a *g = ch->g; + struct gr_gk20a *gr = &g->gr; + struct gk20a_cs_snapshot *css = gr->cs_data; + + if (!css->hw_snapshot) + return -EINVAL; + + *pending = css_hw_get_pending_snapshots(g); + if (!*pending) + return 0; + + *hw_overflow = css_hw_get_overflow_status(g); + return 0; +} diff --git a/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.h b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.h new file mode 100644 index 000000000..6b11a62e4 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/css_gr_gv11b.h @@ -0,0 +1,34 @@ +/* + * GV11B Cycle stats snapshots support + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef CSS_GR_GV11B_H +#define CSS_GR_GV11B_H + +int gv11b_css_hw_enable_snapshot(struct channel_gk20a *ch, + struct gk20a_cs_snapshot_client *cs_client); +void gv11b_css_hw_disable_snapshot(struct gr_gk20a *gr); +int gv11b_css_hw_check_data_available(struct channel_gk20a *ch, u32 *pending, + bool *hw_overflow); + +#endif /* CSS_GR_GV11B_H */ diff --git a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c new file mode 100644 index 000000000..a02c2ddd3 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.c @@ -0,0 +1,99 @@ +/* + * Tegra GV11B GPU Debugger/Profiler Driver + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include "gk20a/gk20a.h" +#include + +int gv11b_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size) +{ + struct mm_gk20a *mm = &g->mm; + u32 virt_addr_lo; + u32 virt_addr_hi; + u32 inst_pa_page; + int err; + + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + err = gk20a_busy(g); + if (err) { + nvgpu_err(g, "failed to poweron"); + return err; + } + + err = gk20a_alloc_inst_block(g, &mm->perfbuf.inst_block); + if (err) + return err; + + g->ops.mm.init_inst_block(&mm->perfbuf.inst_block, mm->perfbuf.vm, 0); + + virt_addr_lo = u64_lo32(offset); + virt_addr_hi = u64_hi32(offset); + + gk20a_writel(g, perf_pmasys_outbase_r(), virt_addr_lo); + gk20a_writel(g, perf_pmasys_outbaseupper_r(), + perf_pmasys_outbaseupper_ptr_f(virt_addr_hi)); + gk20a_writel(g, perf_pmasys_outsize_r(), size); + + /* this field is aligned to 4K */ + inst_pa_page = nvgpu_inst_block_addr(g, &mm->perfbuf.inst_block) >> 12; + + gk20a_writel(g, perf_pmasys_mem_block_r(), + perf_pmasys_mem_block_base_f(inst_pa_page) | + perf_pmasys_mem_block_valid_true_f() | + nvgpu_aperture_mask(g, &mm->perfbuf.inst_block, ++ perf_pmasys_mem_block_target_sys_ncoh_f(), ++ perf_pmasys_mem_block_target_lfb_f())); + + gk20a_idle(g); + return 0; +} + +/* must be called with dbg_sessions_lock held */ +int gv11b_perfbuf_disable_locked(struct gk20a *g) +{ + int err; + + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, ""); + err = gk20a_busy(g); + if (err) { + nvgpu_err(g, "failed to poweron"); + return err; + } + + gk20a_writel(g, perf_pmasys_outbase_r(), 0); + gk20a_writel(g, perf_pmasys_outbaseupper_r(), + perf_pmasys_outbaseupper_ptr_f(0)); + gk20a_writel(g, perf_pmasys_outsize_r(), 0); + + gk20a_writel(g, perf_pmasys_mem_block_r(), + perf_pmasys_mem_block_base_f(0) | + perf_pmasys_mem_block_valid_false_f() | + perf_pmasys_mem_block_target_f(0)); + + gk20a_idle(g); + + return 0; +} diff --git a/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.h b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.h new file mode 100644 index 000000000..88771a490 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/dbg_gpu_gv11b.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef DBG_GPU_GV11B_H +#define DBG_GPU_GV11B_H + +int gv11b_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size); +int gv11b_perfbuf_disable_locked(struct gk20a *g); + +#endif /* DBG_GPU_GV11B_H */ diff --git a/drivers/gpu/nvgpu/gv11b/ecc_gv11b.h b/drivers/gpu/nvgpu/gv11b/ecc_gv11b.h new file mode 100644 index 000000000..94b25c029 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/ecc_gv11b.h @@ -0,0 +1,64 @@ +/* + * GV11B GPU ECC + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVGPU_ECC_GV11B_H_ +#define _NVGPU_ECC_GV11B_H_ + +struct ecc_gr_t19x { + struct gk20a_ecc_stat sm_l1_tag_corrected_err_count; + struct gk20a_ecc_stat sm_l1_tag_uncorrected_err_count; + struct gk20a_ecc_stat sm_cbu_corrected_err_count; + struct gk20a_ecc_stat sm_cbu_uncorrected_err_count; + struct gk20a_ecc_stat sm_l1_data_corrected_err_count; + struct gk20a_ecc_stat sm_l1_data_uncorrected_err_count; + struct gk20a_ecc_stat sm_icache_corrected_err_count; + struct gk20a_ecc_stat sm_icache_uncorrected_err_count; + struct gk20a_ecc_stat gcc_l15_corrected_err_count; + struct gk20a_ecc_stat gcc_l15_uncorrected_err_count; + struct gk20a_ecc_stat fecs_corrected_err_count; + struct gk20a_ecc_stat fecs_uncorrected_err_count; + struct gk20a_ecc_stat gpccs_corrected_err_count; + struct gk20a_ecc_stat gpccs_uncorrected_err_count; + struct gk20a_ecc_stat mmu_l1tlb_corrected_err_count; + struct gk20a_ecc_stat mmu_l1tlb_uncorrected_err_count; +}; + +struct ecc_ltc_t19x { + struct gk20a_ecc_stat l2_cache_corrected_err_count; + struct gk20a_ecc_stat l2_cache_uncorrected_err_count; +}; + +/* TODO: PMU and FB ECC features are still under embargo */ +struct ecc_eng_t19x { + /* FB */ + struct gk20a_ecc_stat mmu_l2tlb_corrected_err_count; + struct gk20a_ecc_stat mmu_l2tlb_uncorrected_err_count; + struct gk20a_ecc_stat mmu_hubtlb_corrected_err_count; + struct gk20a_ecc_stat mmu_hubtlb_uncorrected_err_count; + struct gk20a_ecc_stat mmu_fillunit_corrected_err_count; + struct gk20a_ecc_stat mmu_fillunit_uncorrected_err_count; + /* PMU */ +}; + +#endif diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.c b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c new file mode 100644 index 000000000..ec487bdfc --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.c @@ -0,0 +1,1555 @@ +/* + * GV11B FB + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include +#include +#include +#include +#include + +#include "gk20a/gk20a.h" +#include "gk20a/mm_gk20a.h" + +#include "gp10b/fb_gp10b.h" + +#include "gv11b/fifo_gv11b.h" +#include "gv11b/fb_gv11b.h" +#include "gv11b/ce_gv11b.h" + +#include +#include +#include +#include + +static int gv11b_fb_fix_page_fault(struct gk20a *g, + struct mmu_fault_info *mmfault); + +static int gv11b_fb_mmu_invalidate_replay(struct gk20a *g, + u32 invalidate_replay_val); + +static void gv11b_init_nvlink_soc_credits(struct gk20a *g) +{ + if (nvgpu_is_bpmp_running(g) && (!nvgpu_platform_is_simulation(g))) { + nvgpu_info(g, "nvlink soc credits init done by bpmp"); + } else { + /* MSS_NVLINK_1_BASE */ + void __iomem *soc1 = ioremap(0x01f20010, 4096); + /* MSS_NVLINK_2_BASE */ + void __iomem *soc2 = ioremap(0x01f40010, 4096); + /* MSS_NVLINK_3_BASE */ + void __iomem *soc3 = ioremap(0x01f60010, 4096); + /* MSS_NVLINK_4_BASE */ + void __iomem *soc4 = ioremap(0x01f80010, 4096); + u32 val; + + nvgpu_info(g, "init nvlink soc credits"); + + val = readl_relaxed(soc1); + writel_relaxed(val, soc1); + val = readl_relaxed(soc1 + 4); + writel_relaxed(val, soc1 + 4); + + val = readl_relaxed(soc2); + writel_relaxed(val, soc2); + val = readl_relaxed(soc2 + 4); + writel_relaxed(val, soc2 + 4); + + val = readl_relaxed(soc3); + writel_relaxed(val, soc3); + val = readl_relaxed(soc3 + 4); + writel_relaxed(val, soc3 + 4); + + val = readl_relaxed(soc4); + writel_relaxed(val, soc4); + val = readl_relaxed(soc4 + 4); + writel_relaxed(val, soc4 + 4); + } +} + +void gv11b_fb_init_fs_state(struct gk20a *g) +{ + nvgpu_log(g, gpu_dbg_fn, "initialize gv11b fb"); + + nvgpu_log(g, gpu_dbg_info, "fbhub active ltcs %x", + gk20a_readl(g, fb_fbhub_num_active_ltcs_r())); + + nvgpu_log(g, gpu_dbg_info, "mmu active ltcs %u", + fb_mmu_num_active_ltcs_count_v( + gk20a_readl(g, fb_mmu_num_active_ltcs_r()))); +} + +void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr) +{ + u32 max_size = gr->max_comptag_mem; + /* one tag line covers 64KB */ + u32 max_comptag_lines = max_size << 4; + u32 compbit_base_post_divide; + u64 compbit_base_post_multiply64; + u64 compbit_store_iova; + u64 compbit_base_post_divide64; + + if (nvgpu_is_enabled(g, NVGPU_IS_FMODEL)) + compbit_store_iova = nvgpu_mem_get_phys_addr(g, + &gr->compbit_store.mem); + else + compbit_store_iova = nvgpu_mem_get_addr(g, + &gr->compbit_store.mem); + + compbit_base_post_divide64 = compbit_store_iova >> + fb_mmu_cbc_base_address_alignment_shift_v(); + + do_div(compbit_base_post_divide64, g->ltc_count); + compbit_base_post_divide = u64_lo32(compbit_base_post_divide64); + + compbit_base_post_multiply64 = ((u64)compbit_base_post_divide * + g->ltc_count) << fb_mmu_cbc_base_address_alignment_shift_v(); + + if (compbit_base_post_multiply64 < compbit_store_iova) + compbit_base_post_divide++; + + if (g->ops.ltc.cbc_fix_config) + compbit_base_post_divide = + g->ops.ltc.cbc_fix_config(g, compbit_base_post_divide); + + gk20a_writel(g, fb_mmu_cbc_base_r(), + fb_mmu_cbc_base_address_f(compbit_base_post_divide)); + + nvgpu_log(g, gpu_dbg_info | gpu_dbg_map_v | gpu_dbg_pte, + "compbit base.pa: 0x%x,%08x cbc_base:0x%08x\n", + (u32)(compbit_store_iova >> 32), + (u32)(compbit_store_iova & 0xffffffff), + compbit_base_post_divide); + nvgpu_log(g, gpu_dbg_fn, "cbc base %x", + gk20a_readl(g, fb_mmu_cbc_base_r())); + + gr->compbit_store.base_hw = compbit_base_post_divide; + + g->ops.ltc.cbc_ctrl(g, gk20a_cbc_op_invalidate, + 0, max_comptag_lines - 1); + +} + +void gv11b_fb_reset(struct gk20a *g) +{ + if (nvgpu_is_bpmp_running(g) && (!nvgpu_platform_is_simulation(g))) { + nvgpu_log(g, gpu_dbg_info, "mc_elpg_enable set by bpmp"); + } else { + u32 mc_elpg_enable_val; + + nvgpu_log(g, gpu_dbg_info, "enable xbar, pfb and hub"); + mc_elpg_enable_val = mc_elpg_enable_xbar_enabled_f() | + mc_elpg_enable_pfb_enabled_f() | + mc_elpg_enable_hub_enabled_f(); + mc_elpg_enable_val |= gk20a_readl(g, mc_elpg_enable_r()); + gk20a_writel(g, mc_elpg_enable_r(), mc_elpg_enable_val); + + } + /* fs hub should be out of reset by now */ + gv11b_init_nvlink_soc_credits(g); +} + +static const char * const invalid_str = "invalid"; + +static const char *const fault_type_descs_gv11b[] = { + "invalid pde", + "invalid pde size", + "invalid pte", + "limit violation", + "unbound inst block", + "priv violation", + "write", + "read", + "pitch mask violation", + "work creation", + "unsupported aperture", + "compression failure", + "unsupported kind", + "region violation", + "poison", + "atomic" +}; + +static const char *const fault_client_type_descs_gv11b[] = { + "gpc", + "hub", +}; + +static const char *const fault_access_type_descs_gv11b[] = { + "virt read", + "virt write", + "virt atomic strong", + "virt prefetch", + "virt atomic weak", + "xxx", + "xxx", + "xxx", + "phys read", + "phys write", + "phys atomic", + "phys prefetch", +}; + +static const char *const hub_client_descs_gv11b[] = { + "vip", "ce0", "ce1", "dniso", "fe", "fecs", "host", "host cpu", + "host cpu nb", "iso", "mmu", "nvdec", "nvenc1", "nvenc2", + "niso", "p2p", "pd", "perf", "pmu", "raster twod", "scc", + "scc nb", "sec", "ssync", "gr copy", "xv", "mmu nb", + "nvenc", "d falcon", "sked", "a falcon", "hsce0", "hsce1", + "hsce2", "hsce3", "hsce4", "hsce5", "hsce6", "hsce7", "hsce8", + "hsce9", "hshub", "ptp x0", "ptp x1", "ptp x2", "ptp x3", + "ptp x4", "ptp x5", "ptp x6", "ptp x7", "vpr scrubber0", + "vpr scrubber1", "dwbif", "fbfalcon", "ce shim", "gsp", + "dont care" +}; + +static const char *const gpc_client_descs_gv11b[] = { + "t1 0", "t1 1", "t1 2", "t1 3", + "t1 4", "t1 5", "t1 6", "t1 7", + "pe 0", "pe 1", "pe 2", "pe 3", + "pe 4", "pe 5", "pe 6", "pe 7", + "rast", "gcc", "gpccs", + "prop 0", "prop 1", "prop 2", "prop 3", + "gpm", + "ltp utlb 0", "ltp utlb 1", "ltp utlb 2", "ltp utlb 3", + "ltp utlb 4", "ltp utlb 5", "ltp utlb 6", "ltp utlb 7", + "utlb", + "t1 8", "t1 9", "t1 10", "t1 11", + "t1 12", "t1 13", "t1 14", "t1 15", + "tpccs 0", "tpccs 1", "tpccs 2", "tpccs 3", + "tpccs 4", "tpccs 5", "tpccs 6", "tpccs 7", + "pe 8", "pe 9", "tpccs 8", "tpccs 9", + "t1 16", "t1 17", "t1 18", "t1 19", + "pe 10", "pe 11", "tpccs 10", "tpccs 11", + "t1 20", "t1 21", "t1 22", "t1 23", + "pe 12", "pe 13", "tpccs 12", "tpccs 13", + "t1 24", "t1 25", "t1 26", "t1 27", + "pe 14", "pe 15", "tpccs 14", "tpccs 15", + "t1 28", "t1 29", "t1 30", "t1 31", + "pe 16", "pe 17", "tpccs 16", "tpccs 17", + "t1 32", "t1 33", "t1 34", "t1 35", + "pe 18", "pe 19", "tpccs 18", "tpccs 19", + "t1 36", "t1 37", "t1 38", "t1 39", +}; + +u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g, + unsigned int index) +{ + u32 reg_val; + + reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index)); + return fb_mmu_fault_buffer_size_enable_v(reg_val); +} + +static void gv11b_fb_fault_buffer_get_ptr_update(struct gk20a *g, + unsigned int index, u32 next) +{ + u32 reg_val; + + nvgpu_log(g, gpu_dbg_intr, "updating get index with = %d", next); + + reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index)); + reg_val = set_field(reg_val, fb_mmu_fault_buffer_get_ptr_m(), + fb_mmu_fault_buffer_get_ptr_f(next)); + + /* while the fault is being handled it is possible for overflow + * to happen, + */ + if (reg_val & fb_mmu_fault_buffer_get_overflow_m()) + reg_val |= fb_mmu_fault_buffer_get_overflow_clear_f(); + + gk20a_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val); + + /* make sure get ptr update is visible to everyone to avoid + * reading already read entry + */ + nvgpu_mb(); +} + +static u32 gv11b_fb_fault_buffer_get_index(struct gk20a *g, + unsigned int index) +{ + u32 reg_val; + + reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index)); + return fb_mmu_fault_buffer_get_ptr_v(reg_val); +} + +static u32 gv11b_fb_fault_buffer_put_index(struct gk20a *g, + unsigned int index) +{ + u32 reg_val; + + reg_val = gk20a_readl(g, fb_mmu_fault_buffer_put_r(index)); + return fb_mmu_fault_buffer_put_ptr_v(reg_val); +} + +static u32 gv11b_fb_fault_buffer_size_val(struct gk20a *g, + unsigned int index) +{ + u32 reg_val; + + reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index)); + return fb_mmu_fault_buffer_size_val_v(reg_val); +} + +static bool gv11b_fb_is_fault_buffer_empty(struct gk20a *g, + unsigned int index, u32 *get_idx) +{ + u32 put_idx; + + *get_idx = gv11b_fb_fault_buffer_get_index(g, index); + put_idx = gv11b_fb_fault_buffer_put_index(g, index); + + return *get_idx == put_idx; +} + +static bool gv11b_fb_is_fault_buffer_full(struct gk20a *g, + unsigned int index) +{ + u32 get_idx, put_idx, entries; + + + get_idx = gv11b_fb_fault_buffer_get_index(g, index); + + put_idx = gv11b_fb_fault_buffer_put_index(g, index); + + entries = gv11b_fb_fault_buffer_size_val(g, index); + + return get_idx == ((put_idx + 1) % entries); +} + +void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g, + unsigned int index, unsigned int state) +{ + u32 fault_status; + u32 reg_val; + + nvgpu_log_fn(g, " "); + + reg_val = gk20a_readl(g, fb_mmu_fault_buffer_size_r(index)); + if (state) { + if (gv11b_fb_is_fault_buf_enabled(g, index)) { + nvgpu_log_info(g, "fault buffer is already enabled"); + } else { + reg_val |= fb_mmu_fault_buffer_size_enable_true_f(); + gk20a_writel(g, fb_mmu_fault_buffer_size_r(index), + reg_val); + } + + } else { + struct nvgpu_timeout timeout; + u32 delay = GR_IDLE_CHECK_DEFAULT; + + nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), + NVGPU_TIMER_CPU_TIMER); + + reg_val &= (~(fb_mmu_fault_buffer_size_enable_m())); + gk20a_writel(g, fb_mmu_fault_buffer_size_r(index), reg_val); + + fault_status = gk20a_readl(g, fb_mmu_fault_status_r()); + + do { + if (!(fault_status & fb_mmu_fault_status_busy_true_f())) + break; + /* + * Make sure fault buffer is disabled. + * This is to avoid accessing fault buffer by hw + * during the window BAR2 is being unmapped by s/w + */ + nvgpu_log_info(g, "fault status busy set, check again"); + fault_status = gk20a_readl(g, fb_mmu_fault_status_r()); + + nvgpu_usleep_range(delay, delay * 2); + delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); + } while (!nvgpu_timeout_expired_msg(&timeout, + "fault status busy set")); + } +} + +void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index) +{ + u32 addr_lo; + u32 addr_hi; + + nvgpu_log_fn(g, " "); + + gv11b_fb_fault_buf_set_state_hw(g, index, + FAULT_BUF_DISABLED); + addr_lo = u64_lo32(g->mm.hw_fault_buf[index].gpu_va >> + ram_in_base_shift_v()); + addr_hi = u64_hi32(g->mm.hw_fault_buf[index].gpu_va); + + gk20a_writel(g, fb_mmu_fault_buffer_lo_r(index), + fb_mmu_fault_buffer_lo_addr_f(addr_lo)); + + gk20a_writel(g, fb_mmu_fault_buffer_hi_r(index), + fb_mmu_fault_buffer_hi_addr_f(addr_hi)); + + gk20a_writel(g, fb_mmu_fault_buffer_size_r(index), + fb_mmu_fault_buffer_size_val_f(g->ops.fifo.get_num_fifos(g)) | + fb_mmu_fault_buffer_size_overflow_intr_enable_f()); + + gv11b_fb_fault_buf_set_state_hw(g, index, FAULT_BUF_ENABLED); +} + +static void gv11b_fb_intr_en_set(struct gk20a *g, + unsigned int index, u32 mask) +{ + u32 reg_val; + + reg_val = gk20a_readl(g, fb_niso_intr_en_set_r(index)); + reg_val |= mask; + gk20a_writel(g, fb_niso_intr_en_set_r(index), reg_val); +} + +static void gv11b_fb_intr_en_clr(struct gk20a *g, + unsigned int index, u32 mask) +{ + u32 reg_val; + + reg_val = gk20a_readl(g, fb_niso_intr_en_clr_r(index)); + reg_val |= mask; + gk20a_writel(g, fb_niso_intr_en_clr_r(index), reg_val); +} + +static u32 gv11b_fb_get_hub_intr_clr_mask(struct gk20a *g, + unsigned int intr_type) +{ + u32 mask = 0; + + if (intr_type & HUB_INTR_TYPE_OTHER) { + mask |= + fb_niso_intr_en_clr_mmu_other_fault_notify_m(); + } + + if (intr_type & HUB_INTR_TYPE_NONREPLAY) { + mask |= + fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_m() | + fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_m(); + } + + if (intr_type & HUB_INTR_TYPE_REPLAY) { + mask |= + fb_niso_intr_en_clr_mmu_replayable_fault_notify_m() | + fb_niso_intr_en_clr_mmu_replayable_fault_overflow_m(); + } + + if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) { + mask |= + fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_m(); + } + + if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) { + mask |= + fb_niso_intr_en_clr_hub_access_counter_notify_m() | + fb_niso_intr_en_clr_hub_access_counter_error_m(); + } + + return mask; +} + +static u32 gv11b_fb_get_hub_intr_en_mask(struct gk20a *g, + unsigned int intr_type) +{ + u32 mask = 0; + + if (intr_type & HUB_INTR_TYPE_OTHER) { + mask |= + fb_niso_intr_en_set_mmu_other_fault_notify_m(); + } + + if (intr_type & HUB_INTR_TYPE_NONREPLAY) { + mask |= + fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m() | + fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m(); + } + + if (intr_type & HUB_INTR_TYPE_REPLAY) { + mask |= + fb_niso_intr_en_set_mmu_replayable_fault_notify_m() | + fb_niso_intr_en_set_mmu_replayable_fault_overflow_m(); + } + + if (intr_type & HUB_INTR_TYPE_ECC_UNCORRECTED) { + mask |= + fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m(); + } + + if (intr_type & HUB_INTR_TYPE_ACCESS_COUNTER) { + mask |= + fb_niso_intr_en_set_hub_access_counter_notify_m() | + fb_niso_intr_en_set_hub_access_counter_error_m(); + } + + return mask; +} + +void gv11b_fb_enable_hub_intr(struct gk20a *g, + unsigned int index, unsigned int intr_type) +{ + u32 mask = 0; + + mask = gv11b_fb_get_hub_intr_en_mask(g, intr_type); + + if (mask) + gv11b_fb_intr_en_set(g, index, mask); +} + +void gv11b_fb_disable_hub_intr(struct gk20a *g, + unsigned int index, unsigned int intr_type) +{ + u32 mask = 0; + + mask = gv11b_fb_get_hub_intr_clr_mask(g, intr_type); + + if (mask) + gv11b_fb_intr_en_clr(g, index, mask); +} + +static void gv11b_handle_l2tlb_ecc_isr(struct gk20a *g, u32 ecc_status) +{ + u32 ecc_addr, corrected_cnt, uncorrected_cnt; + u32 corrected_delta, uncorrected_delta; + u32 corrected_overflow, uncorrected_overflow; + + ecc_addr = gk20a_readl(g, fb_mmu_l2tlb_ecc_address_r()); + corrected_cnt = gk20a_readl(g, + fb_mmu_l2tlb_ecc_corrected_err_count_r()); + uncorrected_cnt = gk20a_readl(g, + fb_mmu_l2tlb_ecc_uncorrected_err_count_r()); + + corrected_delta = fb_mmu_l2tlb_ecc_corrected_err_count_total_v( + corrected_cnt); + uncorrected_delta = fb_mmu_l2tlb_ecc_uncorrected_err_count_total_v( + uncorrected_cnt); + corrected_overflow = ecc_status & + fb_mmu_l2tlb_ecc_status_corrected_err_total_counter_overflow_m(); + + uncorrected_overflow = ecc_status & + fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m(); + + /* clear the interrupt */ + if ((corrected_delta > 0) || corrected_overflow) + gk20a_writel(g, fb_mmu_l2tlb_ecc_corrected_err_count_r(), 0); + if ((uncorrected_delta > 0) || uncorrected_overflow) + gk20a_writel(g, fb_mmu_l2tlb_ecc_uncorrected_err_count_r(), 0); + + gk20a_writel(g, fb_mmu_l2tlb_ecc_status_r(), + fb_mmu_l2tlb_ecc_status_reset_clear_f()); + + /* Handle overflow */ + if (corrected_overflow) + corrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_corrected_err_count_total_s()); + if (uncorrected_overflow) + uncorrected_delta += (0x1UL << fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s()); + + + g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count.counters[0] += + corrected_delta; + g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count.counters[0] += + uncorrected_delta; + + if (ecc_status & fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m()) + nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error"); + if (ecc_status & fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m()) + nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error"); + if (corrected_overflow || uncorrected_overflow) + nvgpu_info(g, "mmu l2tlb ecc counter overflow!"); + + nvgpu_log(g, gpu_dbg_intr, + "ecc error address: 0x%x", ecc_addr); + nvgpu_log(g, gpu_dbg_intr, + "ecc error count corrected: %d, uncorrected %d", + g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count.counters[0], + g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count.counters[0]); +} + +static void gv11b_handle_hubtlb_ecc_isr(struct gk20a *g, u32 ecc_status) +{ + u32 ecc_addr, corrected_cnt, uncorrected_cnt; + u32 corrected_delta, uncorrected_delta; + u32 corrected_overflow, uncorrected_overflow; + + ecc_addr = gk20a_readl(g, fb_mmu_hubtlb_ecc_address_r()); + corrected_cnt = gk20a_readl(g, + fb_mmu_hubtlb_ecc_corrected_err_count_r()); + uncorrected_cnt = gk20a_readl(g, + fb_mmu_hubtlb_ecc_uncorrected_err_count_r()); + + corrected_delta = fb_mmu_hubtlb_ecc_corrected_err_count_total_v( + corrected_cnt); + uncorrected_delta = fb_mmu_hubtlb_ecc_uncorrected_err_count_total_v( + uncorrected_cnt); + corrected_overflow = ecc_status & + fb_mmu_hubtlb_ecc_status_corrected_err_total_counter_overflow_m(); + + uncorrected_overflow = ecc_status & + fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m(); + + /* clear the interrupt */ + if ((corrected_delta > 0) || corrected_overflow) + gk20a_writel(g, fb_mmu_hubtlb_ecc_corrected_err_count_r(), 0); + if ((uncorrected_delta > 0) || uncorrected_overflow) + gk20a_writel(g, fb_mmu_hubtlb_ecc_uncorrected_err_count_r(), 0); + + gk20a_writel(g, fb_mmu_hubtlb_ecc_status_r(), + fb_mmu_hubtlb_ecc_status_reset_clear_f()); + + /* Handle overflow */ + if (corrected_overflow) + corrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_corrected_err_count_total_s()); + if (uncorrected_overflow) + uncorrected_delta += (0x1UL << fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s()); + + + g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count.counters[0] += + corrected_delta; + g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count.counters[0] += + uncorrected_delta; + + if (ecc_status & fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m()) + nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error"); + if (ecc_status & fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m()) + nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error"); + if (corrected_overflow || uncorrected_overflow) + nvgpu_info(g, "mmu hubtlb ecc counter overflow!"); + + nvgpu_log(g, gpu_dbg_intr, + "ecc error address: 0x%x", ecc_addr); + nvgpu_log(g, gpu_dbg_intr, + "ecc error count corrected: %d, uncorrected %d", + g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count.counters[0], + g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count.counters[0]); +} + +static void gv11b_handle_fillunit_ecc_isr(struct gk20a *g, u32 ecc_status) +{ + u32 ecc_addr, corrected_cnt, uncorrected_cnt; + u32 corrected_delta, uncorrected_delta; + u32 corrected_overflow, uncorrected_overflow; + + ecc_addr = gk20a_readl(g, fb_mmu_fillunit_ecc_address_r()); + corrected_cnt = gk20a_readl(g, + fb_mmu_fillunit_ecc_corrected_err_count_r()); + uncorrected_cnt = gk20a_readl(g, + fb_mmu_fillunit_ecc_uncorrected_err_count_r()); + + corrected_delta = fb_mmu_fillunit_ecc_corrected_err_count_total_v( + corrected_cnt); + uncorrected_delta = fb_mmu_fillunit_ecc_uncorrected_err_count_total_v( + uncorrected_cnt); + corrected_overflow = ecc_status & + fb_mmu_fillunit_ecc_status_corrected_err_total_counter_overflow_m(); + + uncorrected_overflow = ecc_status & + fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m(); + + /* clear the interrupt */ + if ((corrected_delta > 0) || corrected_overflow) + gk20a_writel(g, fb_mmu_fillunit_ecc_corrected_err_count_r(), 0); + if ((uncorrected_delta > 0) || uncorrected_overflow) + gk20a_writel(g, fb_mmu_fillunit_ecc_uncorrected_err_count_r(), 0); + + gk20a_writel(g, fb_mmu_fillunit_ecc_status_r(), + fb_mmu_fillunit_ecc_status_reset_clear_f()); + + /* Handle overflow */ + if (corrected_overflow) + corrected_delta += (0x1UL << fb_mmu_fillunit_ecc_corrected_err_count_total_s()); + if (uncorrected_overflow) + uncorrected_delta += (0x1UL << fb_mmu_fillunit_ecc_uncorrected_err_count_total_s()); + + + g->ecc.eng.t19x.mmu_fillunit_corrected_err_count.counters[0] += + corrected_delta; + g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count.counters[0] += + uncorrected_delta; + + if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m()) + nvgpu_log(g, gpu_dbg_intr, "corrected ecc pte data error"); + if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m()) + nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pte data error"); + if (ecc_status & fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m()) + nvgpu_log(g, gpu_dbg_intr, "corrected ecc pde0 data error"); + if (ecc_status & fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m()) + nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc pde0 data error"); + + if (corrected_overflow || uncorrected_overflow) + nvgpu_info(g, "mmu fillunit ecc counter overflow!"); + + nvgpu_log(g, gpu_dbg_intr, + "ecc error address: 0x%x", ecc_addr); + nvgpu_log(g, gpu_dbg_intr, + "ecc error count corrected: %d, uncorrected %d", + g->ecc.eng.t19x.mmu_fillunit_corrected_err_count.counters[0], + g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count.counters[0]); +} + +static void gv11b_fb_parse_mmfault(struct mmu_fault_info *mmfault) +{ + if (WARN_ON(mmfault->fault_type >= + ARRAY_SIZE(fault_type_descs_gv11b))) + mmfault->fault_type_desc = invalid_str; + else + mmfault->fault_type_desc = + fault_type_descs_gv11b[mmfault->fault_type]; + + if (WARN_ON(mmfault->client_type >= + ARRAY_SIZE(fault_client_type_descs_gv11b))) + mmfault->client_type_desc = invalid_str; + else + mmfault->client_type_desc = + fault_client_type_descs_gv11b[mmfault->client_type]; + + mmfault->client_id_desc = invalid_str; + if (mmfault->client_type == + gmmu_fault_client_type_hub_v()) { + + if (!(WARN_ON(mmfault->client_id >= + ARRAY_SIZE(hub_client_descs_gv11b)))) + mmfault->client_id_desc = + hub_client_descs_gv11b[mmfault->client_id]; + } else if (mmfault->client_type == + gmmu_fault_client_type_gpc_v()) { + if (!(WARN_ON(mmfault->client_id >= + ARRAY_SIZE(gpc_client_descs_gv11b)))) + mmfault->client_id_desc = + gpc_client_descs_gv11b[mmfault->client_id]; + } + +} + +static void gv11b_fb_print_fault_info(struct gk20a *g, + struct mmu_fault_info *mmfault) +{ + if (mmfault && mmfault->valid) { + nvgpu_err(g, "[MMU FAULT] " + "mmu engine id: %d, " + "ch id: %d, " + "fault addr: 0x%llx, " + "fault addr aperture: %d, " + "fault type: %s, " + "access type: %s, ", + mmfault->mmu_engine_id, + mmfault->chid, + mmfault->fault_addr, + mmfault->fault_addr_aperture, + mmfault->fault_type_desc, + fault_access_type_descs_gv11b[mmfault->access_type]); + nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] " + "mmu engine id: %d, " + "faulted act eng id if any: 0x%x, " + "faulted veid if any: 0x%x, " + "faulted pbdma id if any: 0x%x, " + "fault addr: 0x%llx, ", + mmfault->mmu_engine_id, + mmfault->faulted_engine, + mmfault->faulted_subid, + mmfault->faulted_pbdma, + mmfault->fault_addr); + nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] " + "fault addr aperture: %d, " + "fault type: %s, " + "access type: %s, " + "inst ptr: 0x%llx, " + "inst ptr aperture: %d, ", + mmfault->fault_addr_aperture, + mmfault->fault_type_desc, + fault_access_type_descs_gv11b[mmfault->access_type], + mmfault->inst_ptr, + mmfault->inst_aperture); + nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] " + "ch id: %d, " + "timestamp hi:lo 0x%08x:0x%08x, " + "client type: %s, " + "client id: %s, " + "gpc id if client type is gpc: %d, ", + mmfault->chid, + mmfault->timestamp_hi, mmfault->timestamp_lo, + mmfault->client_type_desc, + mmfault->client_id_desc, + mmfault->gpc_id); + nvgpu_log(g, gpu_dbg_intr, "[MMU FAULT] " + "protected mode: %d, " + "replayable fault: %d, " + "replayable fault en: %d ", + mmfault->protected_mode, + mmfault->replayable_fault, + mmfault->replay_fault_en); + } +} + +/* + *Fault buffer format + * + * 31 28 24 23 16 15 8 7 4 0 + *.-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-. + *| inst_lo |0 0|apr|0 0 0 0 0 0 0 0| + *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' + *| inst_hi | + *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' + *| addr_31_12 | |AP | + *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' + *| addr_63_32 | + *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' + *| timestamp_lo | + *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' + *| timestamp_hi | + *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' + *| (reserved) | engine_id | + *`-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-' + *|V|R|P| gpc_id |0 0 0|t|0|acctp|0| client |RF0 0|faulttype| + */ + +static void gv11b_fb_copy_from_hw_fault_buf(struct gk20a *g, + struct nvgpu_mem *mem, u32 offset, struct mmu_fault_info *mmfault) +{ + u32 rd32_val; + u32 addr_lo, addr_hi; + u64 inst_ptr; + u32 chid = FIFO_INVAL_CHANNEL_ID; + struct channel_gk20a *refch; + + memset(mmfault, 0, sizeof(*mmfault)); + + rd32_val = nvgpu_mem_rd32(g, mem, offset + + gmmu_fault_buf_entry_inst_lo_w()); + addr_lo = gmmu_fault_buf_entry_inst_lo_v(rd32_val); + addr_lo = addr_lo << ram_in_base_shift_v(); + + addr_hi = nvgpu_mem_rd32(g, mem, offset + + gmmu_fault_buf_entry_inst_hi_w()); + addr_hi = gmmu_fault_buf_entry_inst_hi_v(addr_hi); + + inst_ptr = hi32_lo32_to_u64(addr_hi, addr_lo); + + /* refch will be put back after fault is handled */ + refch = gk20a_refch_from_inst_ptr(g, inst_ptr); + if (refch) + chid = refch->chid; + + /* it is ok to continue even if refch is NULL */ + mmfault->refch = refch; + mmfault->chid = chid; + mmfault->inst_ptr = inst_ptr; + mmfault->inst_aperture = gmmu_fault_buf_entry_inst_aperture_v(rd32_val); + + rd32_val = nvgpu_mem_rd32(g, mem, offset + + gmmu_fault_buf_entry_addr_lo_w()); + + mmfault->fault_addr_aperture = + gmmu_fault_buf_entry_addr_phys_aperture_v(rd32_val); + addr_lo = gmmu_fault_buf_entry_addr_lo_v(rd32_val); + addr_lo = addr_lo << ram_in_base_shift_v(); + + rd32_val = nvgpu_mem_rd32(g, mem, offset + + gmmu_fault_buf_entry_addr_hi_w()); + addr_hi = gmmu_fault_buf_entry_addr_hi_v(rd32_val); + mmfault->fault_addr = hi32_lo32_to_u64(addr_hi, addr_lo); + + rd32_val = nvgpu_mem_rd32(g, mem, offset + + gmmu_fault_buf_entry_timestamp_lo_w()); + mmfault->timestamp_lo = + gmmu_fault_buf_entry_timestamp_lo_v(rd32_val); + + rd32_val = nvgpu_mem_rd32(g, mem, offset + + gmmu_fault_buf_entry_timestamp_hi_w()); + mmfault->timestamp_hi = + gmmu_fault_buf_entry_timestamp_hi_v(rd32_val); + + rd32_val = nvgpu_mem_rd32(g, mem, offset + + gmmu_fault_buf_entry_engine_id_w()); + + mmfault->mmu_engine_id = + gmmu_fault_buf_entry_engine_id_v(rd32_val); + gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(g, mmfault->mmu_engine_id, + &mmfault->faulted_engine, &mmfault->faulted_subid, + &mmfault->faulted_pbdma); + + rd32_val = nvgpu_mem_rd32(g, mem, offset + + gmmu_fault_buf_entry_fault_type_w()); + mmfault->client_id = + gmmu_fault_buf_entry_client_v(rd32_val); + mmfault->replayable_fault = + gmmu_fault_buf_entry_replayable_fault_v(rd32_val); + + mmfault->fault_type = + gmmu_fault_buf_entry_fault_type_v(rd32_val); + mmfault->access_type = + gmmu_fault_buf_entry_access_type_v(rd32_val); + + mmfault->client_type = + gmmu_fault_buf_entry_mmu_client_type_v(rd32_val); + + mmfault->gpc_id = + gmmu_fault_buf_entry_gpc_id_v(rd32_val); + mmfault->protected_mode = + gmmu_fault_buf_entry_protected_mode_v(rd32_val); + + mmfault->replay_fault_en = + gmmu_fault_buf_entry_replayable_fault_en_v(rd32_val); + + mmfault->valid = gmmu_fault_buf_entry_valid_v(rd32_val); + + rd32_val = nvgpu_mem_rd32(g, mem, offset + + gmmu_fault_buf_entry_fault_type_w()); + rd32_val &= ~(gmmu_fault_buf_entry_valid_m()); + nvgpu_mem_wr32(g, mem, offset + gmmu_fault_buf_entry_valid_w(), + rd32_val); + + gv11b_fb_parse_mmfault(mmfault); +} + +static void gv11b_fb_handle_mmu_fault_common(struct gk20a *g, + struct mmu_fault_info *mmfault, u32 *invalidate_replay_val) +{ + unsigned int id_type; + u32 num_lce, act_eng_bitmask = 0; + int err = 0; + u32 id = ((u32)~0); + + if (!mmfault->valid) + return; + + gv11b_fb_print_fault_info(g, mmfault); + + num_lce = gv11b_ce_get_num_lce(g); + if ((mmfault->mmu_engine_id >= + gmmu_fault_mmu_eng_id_ce0_v()) && + (mmfault->mmu_engine_id < + gmmu_fault_mmu_eng_id_ce0_v() + num_lce)) { + /* CE page faults are not reported as replayable */ + nvgpu_log(g, gpu_dbg_intr, "CE Faulted"); + err = gv11b_fb_fix_page_fault(g, mmfault); + gv11b_fifo_reset_pbdma_and_eng_faulted(g, mmfault->refch, + mmfault->faulted_pbdma, mmfault->faulted_engine); + if (!err) { + nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Fixed"); + *invalidate_replay_val = 0; + /* refch in mmfault is assigned at the time of copying + * fault info from snap reg or bar2 fault buf + */ + gk20a_channel_put(mmfault->refch); + return; + } + /* Do recovery. Channel recovery needs refch */ + nvgpu_log(g, gpu_dbg_intr, "CE Page Fault Not Fixed"); + } + + if (!mmfault->replayable_fault) { + if (mmfault->fault_type == + gmmu_fault_type_unbound_inst_block_v()) { + /* + * Bug 1847172: When an engine faults due to an unbound + * instance block, the fault cannot be isolated to a + * single context so we need to reset the entire runlist + */ + id_type = ID_TYPE_UNKNOWN; + nvgpu_log(g, gpu_dbg_intr, "UNBOUND INST BLOCK MMU FAULT"); + + } else if (mmfault->refch) { + if (gk20a_is_channel_marked_as_tsg(mmfault->refch)) { + id = mmfault->refch->tsgid; + id_type = ID_TYPE_TSG; + } else { + id = mmfault->chid; + id_type = ID_TYPE_CHANNEL; + } + } else { + id_type = ID_TYPE_UNKNOWN; + } + if (mmfault->faulted_engine != FIFO_INVAL_ENGINE_ID) + act_eng_bitmask = BIT(mmfault->faulted_engine); + + g->ops.fifo.teardown_ch_tsg(g, act_eng_bitmask, + id, id_type, RC_TYPE_MMU_FAULT, mmfault); + } else { + if (mmfault->fault_type == gmmu_fault_type_pte_v()) { + nvgpu_log(g, gpu_dbg_intr, "invalid pte! try to fix"); + err = gv11b_fb_fix_page_fault(g, mmfault); + if (err) + *invalidate_replay_val |= + fb_mmu_invalidate_replay_cancel_global_f(); + else + *invalidate_replay_val |= + fb_mmu_invalidate_replay_start_ack_all_f(); + } else { + /* cancel faults other than invalid pte */ + *invalidate_replay_val |= + fb_mmu_invalidate_replay_cancel_global_f(); + } + /* refch in mmfault is assigned at the time of copying + * fault info from snap reg or bar2 fault buf + */ + gk20a_channel_put(mmfault->refch); + } +} + +static void gv11b_fb_replay_or_cancel_faults(struct gk20a *g, + u32 invalidate_replay_val) +{ + int err = 0; + + nvgpu_log_fn(g, " "); + + if (invalidate_replay_val & + fb_mmu_invalidate_replay_cancel_global_f()) { + /* + * cancel faults so that next time it faults as + * replayable faults and channel recovery can be done + */ + err = gv11b_fb_mmu_invalidate_replay(g, + fb_mmu_invalidate_replay_cancel_global_f()); + } else if (invalidate_replay_val & + fb_mmu_invalidate_replay_start_ack_all_f()) { + /* pte valid is fixed. replay faulting request */ + err = gv11b_fb_mmu_invalidate_replay(g, + fb_mmu_invalidate_replay_start_ack_all_f()); + } +} + +static void gv11b_fb_handle_mmu_nonreplay_replay_fault(struct gk20a *g, + u32 fault_status, unsigned int index) +{ + u32 get_indx, offset, rd32_val, entries; + struct nvgpu_mem *mem; + struct mmu_fault_info *mmfault; + u32 invalidate_replay_val = 0; + u64 prev_fault_addr = 0ULL; + u64 next_fault_addr = 0ULL; + + if (gv11b_fb_is_fault_buffer_empty(g, index, &get_indx)) { + nvgpu_log(g, gpu_dbg_intr, + "SPURIOUS mmu fault: reg index:%d", index); + return; + } + nvgpu_info(g, "%s MMU FAULT" , + index == REPLAY_REG_INDEX ? "REPLAY" : "NON-REPLAY"); + + nvgpu_log(g, gpu_dbg_intr, "get ptr = %d", get_indx); + + mem = &g->mm.hw_fault_buf[index]; + mmfault = g->mm.fault_info[index]; + + entries = gv11b_fb_fault_buffer_size_val(g, index); + nvgpu_log(g, gpu_dbg_intr, "buffer num entries = %d", entries); + + offset = (get_indx * gmmu_fault_buf_size_v()) / sizeof(u32); + nvgpu_log(g, gpu_dbg_intr, "starting word offset = 0x%x", offset); + + rd32_val = nvgpu_mem_rd32(g, mem, + offset + gmmu_fault_buf_entry_valid_w()); + nvgpu_log(g, gpu_dbg_intr, "entry valid offset val = 0x%x", rd32_val); + + while ((rd32_val & gmmu_fault_buf_entry_valid_m())) { + + nvgpu_log(g, gpu_dbg_intr, "entry valid = 0x%x", rd32_val); + + gv11b_fb_copy_from_hw_fault_buf(g, mem, offset, mmfault); + + get_indx = (get_indx + 1) % entries; + nvgpu_log(g, gpu_dbg_intr, "new get index = %d", get_indx); + + gv11b_fb_fault_buffer_get_ptr_update(g, index, get_indx); + + offset = (get_indx * gmmu_fault_buf_size_v()) / sizeof(u32); + nvgpu_log(g, gpu_dbg_intr, "next word offset = 0x%x", offset); + + rd32_val = nvgpu_mem_rd32(g, mem, + offset + gmmu_fault_buf_entry_valid_w()); + + if (index == REPLAY_REG_INDEX && mmfault->fault_addr != 0ULL) { + /* fault_addr "0" is not supposed to be fixed ever. + * For the first time when prev = 0, next = 0 and + * fault addr is also 0 then handle_mmu_fault_common will + * not be called. Fix by checking fault_addr not equal to 0 + */ + prev_fault_addr = next_fault_addr; + next_fault_addr = mmfault->fault_addr; + if (prev_fault_addr == next_fault_addr) { + nvgpu_log(g, gpu_dbg_intr, "pte is fixed"); + if (mmfault->refch) + gk20a_channel_put(mmfault->refch); + /* pte already fixed for this addr */ + continue; + } + } + + gv11b_fb_handle_mmu_fault_common(g, mmfault, + &invalidate_replay_val); + + } + if (index == REPLAY_REG_INDEX && invalidate_replay_val) + gv11b_fb_replay_or_cancel_faults(g, invalidate_replay_val); +} + +static void gv11b_mm_copy_from_fault_snap_reg(struct gk20a *g, + u32 fault_status, struct mmu_fault_info *mmfault) +{ + u32 reg_val; + u32 addr_lo, addr_hi; + u64 inst_ptr; + int chid = FIFO_INVAL_CHANNEL_ID; + struct channel_gk20a *refch; + + memset(mmfault, 0, sizeof(*mmfault)); + + if (!(fault_status & fb_mmu_fault_status_valid_set_f())) { + + nvgpu_log(g, gpu_dbg_intr, "mmu fault status valid not set"); + return; + } + + reg_val = gk20a_readl(g, fb_mmu_fault_inst_lo_r()); + addr_lo = fb_mmu_fault_inst_lo_addr_v(reg_val); + addr_lo = addr_lo << ram_in_base_shift_v(); + + addr_hi = gk20a_readl(g, fb_mmu_fault_inst_hi_r()); + addr_hi = fb_mmu_fault_inst_hi_addr_v(addr_hi); + inst_ptr = hi32_lo32_to_u64(addr_hi, addr_lo); + + /* refch will be put back after fault is handled */ + refch = gk20a_refch_from_inst_ptr(g, inst_ptr); + if (refch) + chid = refch->chid; + + /* It is still ok to continue if refch is NULL */ + mmfault->refch = refch; + mmfault->chid = chid; + mmfault->inst_ptr = inst_ptr; + mmfault->inst_aperture = fb_mmu_fault_inst_lo_aperture_v(reg_val); + mmfault->mmu_engine_id = fb_mmu_fault_inst_lo_engine_id_v(reg_val); + + gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(g, mmfault->mmu_engine_id, + &mmfault->faulted_engine, &mmfault->faulted_subid, + &mmfault->faulted_pbdma); + + reg_val = gk20a_readl(g, fb_mmu_fault_addr_lo_r()); + addr_lo = fb_mmu_fault_addr_lo_addr_v(reg_val); + addr_lo = addr_lo << ram_in_base_shift_v(); + + mmfault->fault_addr_aperture = + fb_mmu_fault_addr_lo_phys_aperture_v(reg_val); + + addr_hi = gk20a_readl(g, fb_mmu_fault_addr_hi_r()); + addr_hi = fb_mmu_fault_addr_hi_addr_v(addr_hi); + mmfault->fault_addr = hi32_lo32_to_u64(addr_hi, addr_lo); + + reg_val = gk20a_readl(g, fb_mmu_fault_info_r()); + mmfault->fault_type = fb_mmu_fault_info_fault_type_v(reg_val); + mmfault->replayable_fault = + fb_mmu_fault_info_replayable_fault_v(reg_val); + mmfault->client_id = fb_mmu_fault_info_client_v(reg_val); + mmfault->access_type = fb_mmu_fault_info_access_type_v(reg_val); + mmfault->client_type = fb_mmu_fault_info_client_type_v(reg_val); + mmfault->gpc_id = fb_mmu_fault_info_gpc_id_v(reg_val); + mmfault->protected_mode = + fb_mmu_fault_info_protected_mode_v(reg_val); + mmfault->replay_fault_en = + fb_mmu_fault_info_replayable_fault_en_v(reg_val); + + mmfault->valid = fb_mmu_fault_info_valid_v(reg_val); + + fault_status &= ~(fb_mmu_fault_status_valid_m()); + gk20a_writel(g, fb_mmu_fault_status_r(), fault_status); + + gv11b_fb_parse_mmfault(mmfault); + +} + +static void gv11b_fb_handle_replay_fault_overflow(struct gk20a *g, + u32 fault_status) +{ + u32 reg_val; + unsigned int index = REPLAY_REG_INDEX; + + reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index)); + + if (fault_status & + fb_mmu_fault_status_replayable_getptr_corrupted_m()) { + + nvgpu_err(g, "replayable getptr corrupted set"); + + gv11b_fb_fault_buf_configure_hw(g, index); + + reg_val = set_field(reg_val, + fb_mmu_fault_buffer_get_getptr_corrupted_m(), + fb_mmu_fault_buffer_get_getptr_corrupted_clear_f()); + } + + if (fault_status & + fb_mmu_fault_status_replayable_overflow_m()) { + bool buffer_full = gv11b_fb_is_fault_buffer_full(g, index); + + nvgpu_err(g, "replayable overflow: buffer full:%s", + buffer_full?"true":"false"); + + reg_val = set_field(reg_val, + fb_mmu_fault_buffer_get_overflow_m(), + fb_mmu_fault_buffer_get_overflow_clear_f()); + } + + gk20a_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val); +} + +static void gv11b_fb_handle_nonreplay_fault_overflow(struct gk20a *g, + u32 fault_status) +{ + u32 reg_val; + unsigned int index = NONREPLAY_REG_INDEX; + + reg_val = gk20a_readl(g, fb_mmu_fault_buffer_get_r(index)); + + if (fault_status & + fb_mmu_fault_status_non_replayable_getptr_corrupted_m()) { + + nvgpu_err(g, "non replayable getptr corrupted set"); + + gv11b_fb_fault_buf_configure_hw(g, index); + + reg_val = set_field(reg_val, + fb_mmu_fault_buffer_get_getptr_corrupted_m(), + fb_mmu_fault_buffer_get_getptr_corrupted_clear_f()); + } + + if (fault_status & + fb_mmu_fault_status_non_replayable_overflow_m()) { + + bool buffer_full = gv11b_fb_is_fault_buffer_full(g, index); + + nvgpu_err(g, "non replayable overflow: buffer full:%s", + buffer_full?"true":"false"); + + reg_val = set_field(reg_val, + fb_mmu_fault_buffer_get_overflow_m(), + fb_mmu_fault_buffer_get_overflow_clear_f()); + } + + gk20a_writel(g, fb_mmu_fault_buffer_get_r(index), reg_val); +} + +static void gv11b_fb_handle_bar2_fault(struct gk20a *g, + struct mmu_fault_info *mmfault, u32 fault_status) +{ + gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, + HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY); + + + if (fault_status & fb_mmu_fault_status_non_replayable_error_m()) { + if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX)) + gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX); + } + + if (fault_status & fb_mmu_fault_status_replayable_error_m()) { + if (gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX)) + gv11b_fb_fault_buf_configure_hw(g, REPLAY_REG_INDEX); + } + gv11b_ce_mthd_buffer_fault_in_bar2_fault(g); + + g->ops.mm.init_bar2_mm_hw_setup(g); + + if (mmfault->refch) { + gk20a_channel_put(mmfault->refch); + mmfault->refch = NULL; + } + gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX, + HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY); +} + +static void gv11b_fb_handle_other_fault_notify(struct gk20a *g, + u32 fault_status) +{ + struct mmu_fault_info *mmfault; + u32 invalidate_replay_val = 0; + + mmfault = g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY]; + + gv11b_mm_copy_from_fault_snap_reg(g, fault_status, mmfault); + + /* BAR2/Physical faults will not be snapped in hw fault buf */ + if (mmfault->mmu_engine_id == gmmu_fault_mmu_eng_id_bar2_v()) { + nvgpu_err(g, "BAR2 MMU FAULT"); + gv11b_fb_handle_bar2_fault(g, mmfault, fault_status); + + } else if (mmfault->mmu_engine_id == + gmmu_fault_mmu_eng_id_physical_v()) { + /* usually means VPR or out of bounds physical accesses */ + nvgpu_err(g, "PHYSICAL MMU FAULT"); + + } else { + gv11b_fb_handle_mmu_fault_common(g, mmfault, + &invalidate_replay_val); + + if (invalidate_replay_val) + gv11b_fb_replay_or_cancel_faults(g, + invalidate_replay_val); + } +} + +static void gv11b_fb_handle_dropped_mmu_fault(struct gk20a *g, u32 fault_status) +{ + u32 dropped_faults = 0; + + dropped_faults = fb_mmu_fault_status_dropped_bar1_phys_set_f() | + fb_mmu_fault_status_dropped_bar1_virt_set_f() | + fb_mmu_fault_status_dropped_bar2_phys_set_f() | + fb_mmu_fault_status_dropped_bar2_virt_set_f() | + fb_mmu_fault_status_dropped_ifb_phys_set_f() | + fb_mmu_fault_status_dropped_ifb_virt_set_f() | + fb_mmu_fault_status_dropped_other_phys_set_f()| + fb_mmu_fault_status_dropped_other_virt_set_f(); + + if (fault_status & dropped_faults) { + nvgpu_err(g, "dropped mmu fault (0x%08x)", + fault_status & dropped_faults); + gk20a_writel(g, fb_mmu_fault_status_r(), dropped_faults); + } +} + + +static void gv11b_fb_handle_mmu_fault(struct gk20a *g, u32 niso_intr) +{ + u32 fault_status = gk20a_readl(g, fb_mmu_fault_status_r()); + + nvgpu_log(g, gpu_dbg_intr, "mmu_fault_status = 0x%08x", fault_status); + + if (niso_intr & + fb_niso_intr_mmu_other_fault_notify_m()) { + + gv11b_fb_handle_dropped_mmu_fault(g, fault_status); + + gv11b_fb_handle_other_fault_notify(g, fault_status); + } + + if (gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX)) { + + if (niso_intr & + fb_niso_intr_mmu_nonreplayable_fault_notify_m()) { + + gv11b_fb_handle_mmu_nonreplay_replay_fault(g, + fault_status, NONREPLAY_REG_INDEX); + + /* + * When all the faults are processed, + * GET and PUT will have same value and mmu fault status + * bit will be reset by HW + */ + } + if (niso_intr & + fb_niso_intr_mmu_nonreplayable_fault_overflow_m()) { + + gv11b_fb_handle_nonreplay_fault_overflow(g, + fault_status); + } + + } + + if (gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX)) { + + if (niso_intr & + fb_niso_intr_mmu_replayable_fault_notify_m()) { + + gv11b_fb_handle_mmu_nonreplay_replay_fault(g, + fault_status, REPLAY_REG_INDEX); + } + if (niso_intr & + fb_niso_intr_mmu_replayable_fault_overflow_m()) { + + gv11b_fb_handle_replay_fault_overflow(g, + fault_status); + } + + } + + nvgpu_log(g, gpu_dbg_intr, "clear mmu fault status"); + gk20a_writel(g, fb_mmu_fault_status_r(), + fb_mmu_fault_status_valid_clear_f()); +} + +void gv11b_fb_hub_isr(struct gk20a *g) +{ + u32 status, niso_intr; + + nvgpu_mutex_acquire(&g->mm.hub_isr_mutex); + + niso_intr = gk20a_readl(g, fb_niso_intr_r()); + + nvgpu_info(g, "enter hub isr, niso_intr = 0x%08x", niso_intr); + + if (niso_intr & + (fb_niso_intr_hub_access_counter_notify_m() | + fb_niso_intr_hub_access_counter_error_m())) { + + nvgpu_info(g, "hub access counter notify/error"); + } + if (niso_intr & + fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f()) { + + nvgpu_info(g, "ecc uncorrected error notify"); + + /* disable interrupts during handling */ + gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, + HUB_INTR_TYPE_ECC_UNCORRECTED); + + status = gk20a_readl(g, fb_mmu_l2tlb_ecc_status_r()); + if (status) + gv11b_handle_l2tlb_ecc_isr(g, status); + + status = gk20a_readl(g, fb_mmu_hubtlb_ecc_status_r()); + if (status) + gv11b_handle_hubtlb_ecc_isr(g, status); + + status = gk20a_readl(g, fb_mmu_fillunit_ecc_status_r()); + if (status) + gv11b_handle_fillunit_ecc_isr(g, status); + + /* re-enable interrupts after handling */ + gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX, + HUB_INTR_TYPE_ECC_UNCORRECTED); + + } + if (niso_intr & + (fb_niso_intr_mmu_other_fault_notify_m() | + fb_niso_intr_mmu_replayable_fault_notify_m() | + fb_niso_intr_mmu_replayable_fault_overflow_m() | + fb_niso_intr_mmu_nonreplayable_fault_notify_m() | + fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) { + + nvgpu_info(g, "MMU Fault"); + gv11b_fb_handle_mmu_fault(g, niso_intr); + } + + nvgpu_mutex_release(&g->mm.hub_isr_mutex); +} + +bool gv11b_fb_mmu_fault_pending(struct gk20a *g) +{ + if (gk20a_readl(g, fb_niso_intr_r()) & + (fb_niso_intr_mmu_other_fault_notify_m() | + fb_niso_intr_mmu_ecc_uncorrected_error_notify_m() | + fb_niso_intr_mmu_replayable_fault_notify_m() | + fb_niso_intr_mmu_replayable_fault_overflow_m() | + fb_niso_intr_mmu_nonreplayable_fault_notify_m() | + fb_niso_intr_mmu_nonreplayable_fault_overflow_m())) + return true; + + return false; +} + +static int gv11b_fb_mmu_invalidate_replay(struct gk20a *g, + u32 invalidate_replay_val) +{ + int err = -ETIMEDOUT; + u32 reg_val; + struct nvgpu_timeout timeout; + + gk20a_dbg_fn(""); + + nvgpu_mutex_acquire(&g->mm.tlb_lock); + + reg_val = gk20a_readl(g, fb_mmu_invalidate_r()); + + reg_val |= fb_mmu_invalidate_all_va_true_f() | + fb_mmu_invalidate_all_pdb_true_f() | + invalidate_replay_val | + fb_mmu_invalidate_trigger_true_f(); + + gk20a_writel(g, fb_mmu_invalidate_r(), reg_val); + + /* retry 200 times */ + nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER); + do { + reg_val = gk20a_readl(g, fb_mmu_ctrl_r()); + if (fb_mmu_ctrl_pri_fifo_empty_v(reg_val) != + fb_mmu_ctrl_pri_fifo_empty_false_f()) { + err = 0; + break; + } + nvgpu_udelay(5); + } while (!nvgpu_timeout_expired_msg(&timeout, + "invalidate replay failed on 0x%llx")); + if (err) + nvgpu_err(g, "invalidate replay timedout"); + + nvgpu_mutex_release(&g->mm.tlb_lock); + + return err; +} + +static int gv11b_fb_fix_page_fault(struct gk20a *g, + struct mmu_fault_info *mmfault) +{ + int err = 0; + u32 pte[2]; + + if (mmfault->refch == NULL) { + nvgpu_log(g, gpu_dbg_intr, "refch from mmu_fault_info is NULL"); + return -EINVAL; + } + + err = __nvgpu_get_pte(g, + mmfault->refch->vm, mmfault->fault_addr, &pte[0]); + if (err) { + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, "pte not found"); + return err; + } + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, + "pte: %#08x %#08x", pte[1], pte[0]); + + if (pte[0] == 0x0 && pte[1] == 0x0) { + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, + "pte all zeros, do not set valid"); + return -1; + } + if (pte[0] & gmmu_new_pte_valid_true_f()) { + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, + "pte valid already set"); + return -1; + } + + pte[0] |= gmmu_new_pte_valid_true_f(); + if (pte[0] & gmmu_new_pte_read_only_true_f()) + pte[0] &= ~(gmmu_new_pte_read_only_true_f()); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, + "new pte: %#08x %#08x", pte[1], pte[0]); + + err = __nvgpu_set_pte(g, + mmfault->refch->vm, mmfault->fault_addr, &pte[0]); + if (err) { + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, "pte not fixed"); + return err; + } + /* invalidate tlb so that GMMU does not use old cached translation */ + g->ops.fb.tlb_invalidate(g, mmfault->refch->vm->pdb.mem); + + err = __nvgpu_get_pte(g, + mmfault->refch->vm, mmfault->fault_addr, &pte[0]); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_pte, + "pte after tlb invalidate: %#08x %#08x", + pte[1], pte[0]); + return err; +} diff --git a/drivers/gpu/nvgpu/gv11b/fb_gv11b.h b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h new file mode 100644 index 000000000..d9a6fa771 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/fb_gv11b.h @@ -0,0 +1,72 @@ +/* + * GV11B FB + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVGPU_GV11B_FB +#define _NVGPU_GV11B_FB + +#define STALL_REG_INDEX 0 +#define NONSTALL_REG_INDEX 1 + +#define NONREPLAY_REG_INDEX 0 +#define REPLAY_REG_INDEX 1 + +#define FAULT_BUF_DISABLED 0 +#define FAULT_BUF_ENABLED 1 + +#define FAULT_BUF_INVALID 0 +#define FAULT_BUF_VALID 1 + +#define HUB_INTR_TYPE_OTHER 1 /* bit 0 */ +#define HUB_INTR_TYPE_NONREPLAY 2 /* bit 1 */ +#define HUB_INTR_TYPE_REPLAY 4 /* bit 2 */ +#define HUB_INTR_TYPE_ECC_UNCORRECTED 8 /* bit 3 */ +#define HUB_INTR_TYPE_ACCESS_COUNTER 16 /* bit 4 */ +#define HUB_INTR_TYPE_ALL (HUB_INTR_TYPE_OTHER | \ + HUB_INTR_TYPE_NONREPLAY | \ + HUB_INTR_TYPE_REPLAY | \ + HUB_INTR_TYPE_ECC_UNCORRECTED | \ + HUB_INTR_TYPE_ACCESS_COUNTER) + +#define FAULT_TYPE_OTHER_AND_NONREPLAY 0 +#define FAULT_TYPE_REPLAY 1 + +struct gk20a; + +void gv11b_fb_init_fs_state(struct gk20a *g); +void gv11b_fb_init_cbc(struct gk20a *g, struct gr_gk20a *gr); +void gv11b_fb_reset(struct gk20a *g); +void gv11b_fb_hub_isr(struct gk20a *g); + +u32 gv11b_fb_is_fault_buf_enabled(struct gk20a *g, + unsigned int index); +void gv11b_fb_fault_buf_set_state_hw(struct gk20a *g, + unsigned int index, unsigned int state); +void gv11b_fb_fault_buf_configure_hw(struct gk20a *g, unsigned int index); +void gv11b_fb_enable_hub_intr(struct gk20a *g, + unsigned int index, unsigned int intr_type); +void gv11b_fb_disable_hub_intr(struct gk20a *g, + unsigned int index, unsigned int intr_type); +bool gv11b_fb_mmu_fault_pending(struct gk20a *g); + +#endif diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c new file mode 100644 index 000000000..f87c6deaa --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.c @@ -0,0 +1,1907 @@ +/* + * GV11B fifo + * + * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gk20a/gk20a.h" +#include "gk20a/fifo_gk20a.h" +#include "gk20a/channel_gk20a.h" + +#include "gp10b/fifo_gp10b.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "fifo_gv11b.h" +#include "subctx_gv11b.h" +#include "gr_gv11b.h" +#include "mc_gv11b.h" + +#define PBDMA_SUBDEVICE_ID 1 + +static void gv11b_fifo_init_ramfc_eng_method_buffer(struct gk20a *g, + struct channel_gk20a *ch, struct nvgpu_mem *mem); + +void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist) +{ + + u32 runlist_entry_0 = ram_rl_entry_type_tsg_v(); + + if (tsg->timeslice_timeout) + runlist_entry_0 |= + ram_rl_entry_tsg_timeslice_scale_f(tsg->timeslice_scale) | + ram_rl_entry_tsg_timeslice_timeout_f(tsg->timeslice_timeout); + else + runlist_entry_0 |= + ram_rl_entry_tsg_timeslice_scale_f( + ram_rl_entry_tsg_timeslice_scale_3_v()) | + ram_rl_entry_tsg_timeslice_timeout_f( + ram_rl_entry_tsg_timeslice_timeout_128_v()); + + runlist[0] = runlist_entry_0; + runlist[1] = ram_rl_entry_tsg_length_f(tsg->num_active_channels); + runlist[2] = ram_rl_entry_tsg_tsgid_f(tsg->tsgid); + runlist[3] = 0; + + gk20a_dbg_info("gv11b tsg runlist [0] %x [1] %x [2] %x [3] %x\n", + runlist[0], runlist[1], runlist[2], runlist[3]); + +} + +void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist) +{ + struct gk20a *g = c->g; + u32 addr_lo, addr_hi; + u32 runlist_entry; + + /* Time being use 0 pbdma sequencer */ + runlist_entry = ram_rl_entry_type_channel_v() | + ram_rl_entry_chan_runqueue_selector_f( + c->t19x.runqueue_sel) | + ram_rl_entry_chan_userd_target_f( + nvgpu_aperture_mask(g, &g->fifo.userd, + ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(), + ram_rl_entry_chan_userd_target_vid_mem_v())) | + ram_rl_entry_chan_inst_target_f( + nvgpu_aperture_mask(g, &c->inst_block, + ram_rl_entry_chan_inst_target_sys_mem_ncoh_v(), + ram_rl_entry_chan_inst_target_vid_mem_v())); + + addr_lo = u64_lo32(c->userd_iova) >> + ram_rl_entry_chan_userd_ptr_align_shift_v(); + addr_hi = u64_hi32(c->userd_iova); + runlist[0] = runlist_entry | ram_rl_entry_chan_userd_ptr_lo_f(addr_lo); + runlist[1] = ram_rl_entry_chan_userd_ptr_hi_f(addr_hi); + + addr_lo = u64_lo32(nvgpu_inst_block_addr(g, &c->inst_block)) >> + ram_rl_entry_chan_inst_ptr_align_shift_v(); + addr_hi = u64_hi32(nvgpu_inst_block_addr(g, &c->inst_block)); + + runlist[2] = ram_rl_entry_chan_inst_ptr_lo_f(addr_lo) | + ram_rl_entry_chid_f(c->chid); + runlist[3] = ram_rl_entry_chan_inst_ptr_hi_f(addr_hi); + + gk20a_dbg_info("gv11b channel runlist [0] %x [1] %x [2] %x [3] %x\n", + runlist[0], runlist[1], runlist[2], runlist[3]); +} + +static void gv11b_userd_writeback_config(struct gk20a *g) +{ + gk20a_writel(g, fifo_userd_writeback_r(), fifo_userd_writeback_timer_f( + fifo_userd_writeback_timer_100us_v())); + + +} + +int channel_gv11b_setup_ramfc(struct channel_gk20a *c, + u64 gpfifo_base, u32 gpfifo_entries, + unsigned long acquire_timeout, u32 flags) +{ + struct gk20a *g = c->g; + struct nvgpu_mem *mem = &c->inst_block; + u32 data; + + gk20a_dbg_fn(""); + + nvgpu_memset(g, mem, 0, 0, ram_fc_size_val_v()); + + nvgpu_mem_wr32(g, mem, ram_fc_gp_base_w(), + pbdma_gp_base_offset_f( + u64_lo32(gpfifo_base >> pbdma_gp_base_rsvd_s()))); + + nvgpu_mem_wr32(g, mem, ram_fc_gp_base_hi_w(), + pbdma_gp_base_hi_offset_f(u64_hi32(gpfifo_base)) | + pbdma_gp_base_hi_limit2_f(ilog2(gpfifo_entries))); + + nvgpu_mem_wr32(g, mem, ram_fc_signature_w(), + c->g->ops.fifo.get_pbdma_signature(c->g)); + + nvgpu_mem_wr32(g, mem, ram_fc_pb_header_w(), + pbdma_pb_header_priv_user_f() | + pbdma_pb_header_method_zero_f() | + pbdma_pb_header_subchannel_zero_f() | + pbdma_pb_header_level_main_f() | + pbdma_pb_header_first_true_f() | + pbdma_pb_header_type_inc_f()); + + nvgpu_mem_wr32(g, mem, ram_fc_subdevice_w(), + pbdma_subdevice_id_f(PBDMA_SUBDEVICE_ID) | + pbdma_subdevice_status_active_f() | + pbdma_subdevice_channel_dma_enable_f()); + + nvgpu_mem_wr32(g, mem, ram_fc_target_w(), + pbdma_target_eng_ctx_valid_true_f() | + pbdma_target_ce_ctx_valid_true_f() | + pbdma_target_engine_sw_f()); + + nvgpu_mem_wr32(g, mem, ram_fc_acquire_w(), + g->ops.fifo.pbdma_acquire_val(acquire_timeout)); + + nvgpu_mem_wr32(g, mem, ram_fc_runlist_timeslice_w(), + pbdma_runlist_timeslice_timeout_128_f() | + pbdma_runlist_timeslice_timescale_3_f() | + pbdma_runlist_timeslice_enable_true_f()); + + + nvgpu_mem_wr32(g, mem, ram_fc_chid_w(), ram_fc_chid_id_f(c->chid)); + + if (c->t19x.subctx_id == CHANNEL_INFO_VEID0) + nvgpu_mem_wr32(g, mem, ram_fc_set_channel_info_w(), + pbdma_set_channel_info_scg_type_graphics_compute0_f() | + pbdma_set_channel_info_veid_f(c->t19x.subctx_id)); + else + nvgpu_mem_wr32(g, mem, ram_fc_set_channel_info_w(), + pbdma_set_channel_info_scg_type_compute1_f() | + pbdma_set_channel_info_veid_f(c->t19x.subctx_id)); + + gv11b_fifo_init_ramfc_eng_method_buffer(g, c, mem); + + if (c->is_privileged_channel) { + /* Set privilege level for channel */ + nvgpu_mem_wr32(g, mem, ram_fc_config_w(), + pbdma_config_auth_level_privileged_f()); + + gk20a_fifo_setup_ramfc_for_privileged_channel(c); + } + + /* Enable userd writeback */ + data = nvgpu_mem_rd32(g, mem, ram_fc_config_w()); + data = data | pbdma_config_userd_writeback_enable_f(); + nvgpu_mem_wr32(g, mem, ram_fc_config_w(),data); + + gv11b_userd_writeback_config(g); + + return channel_gp10b_commit_userd(c); +} + + +static void gv11b_ring_channel_doorbell(struct channel_gk20a *c) +{ + struct fifo_gk20a *f = &c->g->fifo; + u32 hw_chid = f->channel_base + c->chid; + + gk20a_dbg_info("channel ring door bell %d\n", c->chid); + + gv11b_usermode_writel(c->g, usermode_notify_channel_pending_r(), + usermode_notify_channel_pending_id_f(hw_chid)); +} + +u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *c) +{ + struct nvgpu_mem *userd_mem = &g->fifo.userd; + u32 offset = c->chid * (g->fifo.userd_entry_size / sizeof(u32)); + + return nvgpu_mem_rd32(g, userd_mem, + offset + ram_userd_gp_get_w()); +} + +u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *c) +{ + struct nvgpu_mem *userd_mem = &g->fifo.userd; + u32 offset = c->chid * (g->fifo.userd_entry_size / sizeof(u32)); + u32 lo = nvgpu_mem_rd32(g, userd_mem, offset + ram_userd_get_w()); + u32 hi = nvgpu_mem_rd32(g, userd_mem, offset + ram_userd_get_hi_w()); + + return ((u64)hi << 32) | lo; +} + +void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *c) +{ + struct nvgpu_mem *userd_mem = &g->fifo.userd; + u32 offset = c->chid * (g->fifo.userd_entry_size / sizeof(u32)); + + nvgpu_mem_wr32(g, userd_mem, offset + ram_userd_gp_put_w(), + c->gpfifo.put); + /* commit everything to cpu */ + nvgpu_smp_mb(); + + gv11b_ring_channel_doorbell(c); +} + +void channel_gv11b_unbind(struct channel_gk20a *ch) +{ + struct gk20a *g = ch->g; + + gk20a_dbg_fn(""); + + if (nvgpu_atomic_cmpxchg(&ch->bound, true, false)) { + gk20a_writel(g, ccsr_channel_inst_r(ch->chid), + ccsr_channel_inst_ptr_f(0) | + ccsr_channel_inst_bind_false_f()); + + gk20a_writel(g, ccsr_channel_r(ch->chid), + ccsr_channel_enable_clr_true_f() | + ccsr_channel_pbdma_faulted_reset_f() | + ccsr_channel_eng_faulted_reset_f()); + } +} + +u32 gv11b_fifo_get_num_fifos(struct gk20a *g) +{ + return ccsr_channel__size_1_v(); +} + +bool gv11b_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid) +{ + return (engine_subid == gmmu_fault_client_type_gpc_v()); +} + +void gv11b_dump_channel_status_ramfc(struct gk20a *g, + struct gk20a_debug_output *o, + u32 chid, + struct ch_state *ch_state) +{ + u32 channel = gk20a_readl(g, ccsr_channel_r(chid)); + u32 status = ccsr_channel_status_v(channel); + u32 *inst_mem; + struct channel_gk20a *c = g->fifo.channel + chid; + struct nvgpu_semaphore_int *hw_sema = NULL; + + if (c->hw_sema) + hw_sema = c->hw_sema; + + if (!ch_state) + return; + + inst_mem = &ch_state->inst_block[0]; + + gk20a_debug_output(o, "%d-%s, pid %d, refs: %d: ", chid, + g->name, + ch_state->pid, + ch_state->refs); + gk20a_debug_output(o, "channel status: %s in use %s %s\n", + ccsr_channel_enable_v(channel) ? "" : "not", + gk20a_decode_ccsr_chan_status(status), + ccsr_channel_busy_v(channel) ? "busy" : "not busy"); + gk20a_debug_output(o, "RAMFC : TOP: %016llx PUT: %016llx GET: %016llx " + "FETCH: %016llx\nHEADER: %08x COUNT: %08x\n" + "SEMAPHORE: addr hi: %08x addr lo: %08x\n" + "payload %08x execute %08x\n", + (u64)inst_mem[ram_fc_pb_top_level_get_w()] + + ((u64)inst_mem[ram_fc_pb_top_level_get_hi_w()] << 32ULL), + (u64)inst_mem[ram_fc_pb_put_w()] + + ((u64)inst_mem[ram_fc_pb_put_hi_w()] << 32ULL), + (u64)inst_mem[ram_fc_pb_get_w()] + + ((u64)inst_mem[ram_fc_pb_get_hi_w()] << 32ULL), + (u64)inst_mem[ram_fc_pb_fetch_w()] + + ((u64)inst_mem[ram_fc_pb_fetch_hi_w()] << 32ULL), + inst_mem[ram_fc_pb_header_w()], + inst_mem[ram_fc_pb_count_w()], + inst_mem[ram_fc_sem_addr_hi_w()], + inst_mem[ram_fc_sem_addr_lo_w()], + inst_mem[ram_fc_sem_payload_lo_w()], + inst_mem[ram_fc_sem_execute_w()]); + if (hw_sema) + gk20a_debug_output(o, "SEMA STATE: value: 0x%08x next_val: 0x%08x addr: 0x%010llx\n", + __nvgpu_semaphore_read(hw_sema), + nvgpu_atomic_read(&hw_sema->next_value), + nvgpu_hw_sema_addr(hw_sema)); + gk20a_debug_output(o, "\n"); +} + +void gv11b_dump_eng_status(struct gk20a *g, + struct gk20a_debug_output *o) +{ + u32 i, host_num_engines; + + host_num_engines = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_ENGINES); + + for (i = 0; i < host_num_engines; i++) { + u32 status = gk20a_readl(g, fifo_engine_status_r(i)); + u32 ctx_status = fifo_engine_status_ctx_status_v(status); + + gk20a_debug_output(o, "%s eng %d: ", g->name, i); + gk20a_debug_output(o, + "id: %d (%s), next_id: %d (%s), ctx status: %s ", + fifo_engine_status_id_v(status), + fifo_engine_status_id_type_v(status) ? + "tsg" : "channel", + fifo_engine_status_next_id_v(status), + fifo_engine_status_next_id_type_v(status) ? + "tsg" : "channel", + gk20a_decode_pbdma_chan_eng_ctx_status(ctx_status)); + + if (fifo_engine_status_eng_reload_v(status)) + gk20a_debug_output(o, "ctx_reload "); + if (fifo_engine_status_faulted_v(status)) + gk20a_debug_output(o, "faulted "); + if (fifo_engine_status_engine_v(status)) + gk20a_debug_output(o, "busy "); + gk20a_debug_output(o, "\n"); + } + gk20a_debug_output(o, "\n"); +} + +u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g) +{ + u32 intr_0_error_mask = + fifo_intr_0_bind_error_pending_f() | + fifo_intr_0_sched_error_pending_f() | + fifo_intr_0_chsw_error_pending_f() | + fifo_intr_0_fb_flush_timeout_pending_f() | + fifo_intr_0_lb_error_pending_f(); + + return intr_0_error_mask; +} + +u32 gv11b_fifo_get_preempt_timeout(struct gk20a *g) +{ + return gk20a_get_gr_idle_timeout(g); +} + +static int gv11b_fifo_poll_pbdma_chan_status(struct gk20a *g, u32 id, + u32 pbdma_id, unsigned int timeout_rc_type) +{ + struct nvgpu_timeout timeout; + unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */ + u32 pbdma_stat; + u32 chan_stat; + int ret = -EBUSY; + + /* timeout in milli seconds */ + nvgpu_timeout_init(g, &timeout, g->ops.fifo.get_preempt_timeout(g), + NVGPU_TIMER_CPU_TIMER); + + nvgpu_log(g, gpu_dbg_info, "wait preempt pbdma %d", pbdma_id); + /* Verify that ch/tsg is no longer on the pbdma */ + do { + /* + * If the PBDMA has a stalling interrupt and receives a NACK, + * the PBDMA won't save out until the STALLING interrupt is + * cleared. Stalling interrupt need not be directly addressed, + * as simply clearing of the interrupt bit will be sufficient + * to allow the PBDMA to save out. If the stalling interrupt + * was due to a SW method or another deterministic failure, + * the PBDMA will assert it when the channel is reloaded + * or resumed. Note that the fault will still be + * reported to SW. + */ + + gk20a_fifo_handle_pbdma_intr(g, &g->fifo, pbdma_id, RC_NO); + + pbdma_stat = gk20a_readl(g, fifo_pbdma_status_r(pbdma_id)); + chan_stat = fifo_pbdma_status_chan_status_v(pbdma_stat); + + if (chan_stat == + fifo_pbdma_status_chan_status_valid_v() || + chan_stat == + fifo_pbdma_status_chan_status_chsw_save_v()) { + + if (id != fifo_pbdma_status_id_v(pbdma_stat)) { + ret = 0; + break; + } + + } else if (chan_stat == + fifo_pbdma_status_chan_status_chsw_load_v()) { + + if (id != fifo_pbdma_status_next_id_v(pbdma_stat)) { + ret = 0; + break; + } + + } else if (chan_stat == + fifo_pbdma_status_chan_status_chsw_switch_v()) { + + if ((id != fifo_pbdma_status_next_id_v(pbdma_stat)) && + (id != fifo_pbdma_status_id_v(pbdma_stat))) { + ret = 0; + break; + } + } else { + /* pbdma status is invalid i.e. it is not loaded */ + ret = 0; + break; + } + + usleep_range(delay, delay * 2); + delay = min_t(unsigned long, + delay << 1, GR_IDLE_CHECK_MAX); + } while (!nvgpu_timeout_expired_msg(&timeout, + "preempt timeout pbdma")); + return ret; +} + +static int gv11b_fifo_poll_eng_ctx_status(struct gk20a *g, u32 id, + u32 act_eng_id, u32 *reset_eng_bitmask, + unsigned int timeout_rc_type) +{ + struct nvgpu_timeout timeout; + unsigned long delay = GR_IDLE_CHECK_DEFAULT; /* in micro seconds */ + u32 eng_stat; + u32 ctx_stat; + int ret = -EBUSY; + bool stall_intr = false; + + /* timeout in milli seconds */ + nvgpu_timeout_init(g, &timeout, g->ops.fifo.get_preempt_timeout(g), + NVGPU_TIMER_CPU_TIMER); + + nvgpu_log(g, gpu_dbg_info, "wait preempt act engine id: %u", + act_eng_id); + /* Check if ch/tsg has saved off the engine or if ctxsw is hung */ + do { + eng_stat = gk20a_readl(g, fifo_engine_status_r(act_eng_id)); + ctx_stat = fifo_engine_status_ctx_status_v(eng_stat); + + if (gv11b_mc_is_stall_and_eng_intr_pending(g, act_eng_id)) { + stall_intr = true; + nvgpu_log(g, gpu_dbg_info | gpu_dbg_intr, + "stall intr set, " + "preemption will not finish"); + } + if (ctx_stat == + fifo_engine_status_ctx_status_ctxsw_switch_v()) { + /* Eng save hasn't started yet. Continue polling */ + + } else if (ctx_stat == + fifo_engine_status_ctx_status_valid_v() || + ctx_stat == + fifo_engine_status_ctx_status_ctxsw_save_v()) { + + if (id == fifo_engine_status_id_v(eng_stat)) { + if (stall_intr || + timeout_rc_type == PREEMPT_TIMEOUT_NORC) { + /* preemption will not finish */ + *reset_eng_bitmask |= BIT(act_eng_id); + ret = 0; + break; + } + } else { + /* context is not running on the engine */ + ret = 0; + break; + } + + } else if (ctx_stat == + fifo_engine_status_ctx_status_ctxsw_load_v()) { + + if (id == fifo_engine_status_next_id_v(eng_stat)) { + + if (stall_intr || + timeout_rc_type == PREEMPT_TIMEOUT_NORC) { + /* preemption will not finish */ + *reset_eng_bitmask |= BIT(act_eng_id); + ret = 0; + break; + } + } else { + /* context is not running on the engine */ + ret = 0; + break; + } + + } else { + /* Preempt should be finished */ + ret = 0; + break; + } + nvgpu_usleep_range(delay, delay * 2); + delay = min_t(unsigned long, + delay << 1, GR_IDLE_CHECK_MAX); + } while (!nvgpu_timeout_expired_msg(&timeout, + "preempt timeout eng")); + return ret; +} + +static void gv11b_reset_eng_faulted_ch(struct gk20a *g, u32 chid) +{ + u32 reg_val; + + reg_val = gk20a_readl(g, ccsr_channel_r(chid)); + reg_val |= ccsr_channel_eng_faulted_reset_f(); + gk20a_writel(g, ccsr_channel_r(chid), reg_val); +} + +static void gv11b_reset_eng_faulted_tsg(struct tsg_gk20a *tsg) +{ + struct gk20a *g = tsg->g; + struct channel_gk20a *ch; + + nvgpu_rwsem_down_read(&tsg->ch_list_lock); + list_for_each_entry(ch, &tsg->ch_list, ch_entry) { + gv11b_reset_eng_faulted_ch(g, ch->chid); + } + nvgpu_rwsem_up_read(&tsg->ch_list_lock); +} + +static void gv11b_reset_pbdma_faulted_ch(struct gk20a *g, u32 chid) +{ + u32 reg_val; + + reg_val = gk20a_readl(g, ccsr_channel_r(chid)); + reg_val |= ccsr_channel_pbdma_faulted_reset_f(); + gk20a_writel(g, ccsr_channel_r(chid), reg_val); +} + +static void gv11b_reset_pbdma_faulted_tsg(struct tsg_gk20a *tsg) +{ + struct gk20a *g = tsg->g; + struct channel_gk20a *ch; + + nvgpu_rwsem_down_read(&tsg->ch_list_lock); + list_for_each_entry(ch, &tsg->ch_list, ch_entry) { + gv11b_reset_pbdma_faulted_ch(g, ch->chid); + } + nvgpu_rwsem_up_read(&tsg->ch_list_lock); +} + +void gv11b_fifo_reset_pbdma_and_eng_faulted(struct gk20a *g, + struct channel_gk20a *refch, + u32 faulted_pbdma, u32 faulted_engine) +{ + struct tsg_gk20a *tsg; + + nvgpu_log(g, gpu_dbg_intr, "reset faulted pbdma:0x%x eng:0x%x", + faulted_pbdma, faulted_engine); + + if (gk20a_is_channel_marked_as_tsg(refch)) { + tsg = &g->fifo.tsg[refch->tsgid]; + if (faulted_pbdma != FIFO_INVAL_PBDMA_ID) + gv11b_reset_pbdma_faulted_tsg(tsg); + if (faulted_engine != FIFO_INVAL_ENGINE_ID) + gv11b_reset_eng_faulted_tsg(tsg); + } else { + if (faulted_pbdma != FIFO_INVAL_PBDMA_ID) + gv11b_reset_pbdma_faulted_ch(g, refch->chid); + if (faulted_engine != FIFO_INVAL_ENGINE_ID) + gv11b_reset_eng_faulted_ch(g, refch->chid); + } +} + +static u32 gv11b_fifo_get_runlists_mask(struct gk20a *g, u32 act_eng_bitmask, + u32 id, unsigned int id_type, unsigned int rc_type, + struct mmu_fault_info *mmfault) +{ + u32 runlists_mask = 0; + struct fifo_gk20a *f = &g->fifo; + struct fifo_runlist_info_gk20a *runlist; + u32 pbdma_bitmask = 0; + + if (id_type != ID_TYPE_UNKNOWN) { + if (id_type == ID_TYPE_TSG) + runlists_mask |= fifo_sched_disable_runlist_m( + f->tsg[id].runlist_id); + else + runlists_mask |= fifo_sched_disable_runlist_m( + f->channel[id].runlist_id); + } + + if (rc_type == RC_TYPE_MMU_FAULT && mmfault) { + if (mmfault->faulted_pbdma != FIFO_INVAL_PBDMA_ID) + pbdma_bitmask = BIT(mmfault->faulted_pbdma); + + for (id = 0; id < f->max_runlists; id++) { + + runlist = &f->runlist_info[id]; + + if (runlist->eng_bitmask & act_eng_bitmask) + runlists_mask |= + fifo_sched_disable_runlist_m(id); + + if (runlist->pbdma_bitmask & pbdma_bitmask) + runlists_mask |= + fifo_sched_disable_runlist_m(id); + } + } + + if (id_type == ID_TYPE_UNKNOWN) { + for (id = 0; id < f->max_runlists; id++) { + if (act_eng_bitmask) { + /* eng ids are known */ + runlist = &f->runlist_info[id]; + if (runlist->eng_bitmask & act_eng_bitmask) + runlists_mask |= + fifo_sched_disable_runlist_m(id); + } else { + runlists_mask |= + fifo_sched_disable_runlist_m(id); + } + } + } + gk20a_dbg_info("runlists_mask = %08x", runlists_mask); + return runlists_mask; +} + +static void gv11b_fifo_runlist_event_intr_disable(struct gk20a *g) +{ + u32 reg_val; + + reg_val = gk20a_readl(g, fifo_intr_en_0_r()); + reg_val &= fifo_intr_0_runlist_event_pending_f(); + gk20a_writel(g, fifo_intr_en_0_r(), reg_val); +} + +static void gv11b_fifo_runlist_event_intr_enable(struct gk20a *g) +{ + u32 reg_val; + + reg_val = gk20a_readl(g, fifo_intr_en_0_r()); + reg_val |= fifo_intr_0_runlist_event_pending_f(); + gk20a_writel(g, fifo_intr_en_0_r(), reg_val); +} + +static void gv11b_fifo_issue_runlist_preempt(struct gk20a *g, + u32 runlists_mask) +{ + u32 reg_val; + + /* issue runlist preempt */ + reg_val = gk20a_readl(g, fifo_runlist_preempt_r()); + reg_val |= runlists_mask; + gk20a_writel(g, fifo_runlist_preempt_r(), reg_val); +} + +static int gv11b_fifo_poll_runlist_preempt_pending(struct gk20a *g, + u32 runlists_mask) +{ + struct nvgpu_timeout timeout; + u32 delay = GR_IDLE_CHECK_DEFAULT; + int ret = -EBUSY; + + nvgpu_timeout_init(g, &timeout, g->ops.fifo.get_preempt_timeout(g), + NVGPU_TIMER_CPU_TIMER); + do { + if (!((gk20a_readl(g, fifo_runlist_preempt_r())) & + runlists_mask)) { + ret = 0; + break; + } + + nvgpu_usleep_range(delay, delay * 2); + delay = min_t(unsigned long, + delay << 1, GR_IDLE_CHECK_MAX); + } while (!nvgpu_timeout_expired_msg(&timeout, + "runlist preempt timeout")); + return ret; +} + +int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, + unsigned int id_type, unsigned int timeout_rc_type) +{ + struct fifo_gk20a *f = &g->fifo; + unsigned long runlist_served_pbdmas; + unsigned long runlist_served_engines; + u32 pbdma_id; + u32 act_eng_id; + u32 runlist_id; + int func_ret; + int ret = 0; + u32 tsgid; + + if (id_type == ID_TYPE_TSG) { + runlist_id = f->tsg[id].runlist_id; + tsgid = id; + } else { + runlist_id = f->channel[id].runlist_id; + tsgid = f->channel[id].tsgid; + } + + nvgpu_log_info(g, "Check preempt pending for tsgid = %u", tsgid); + + runlist_served_pbdmas = f->runlist_info[runlist_id].pbdma_bitmask; + runlist_served_engines = f->runlist_info[runlist_id].eng_bitmask; + + for_each_set_bit(pbdma_id, &runlist_served_pbdmas, f->num_pbdma) { + + func_ret = gv11b_fifo_poll_pbdma_chan_status(g, tsgid, pbdma_id, + timeout_rc_type); + if (func_ret != 0) { + gk20a_dbg_info("preempt timeout pbdma %d", pbdma_id); + ret |= func_ret; + } + } + + f->runlist_info[runlist_id].reset_eng_bitmask = 0; + + for_each_set_bit(act_eng_id, &runlist_served_engines, f->max_engines) { + + func_ret = gv11b_fifo_poll_eng_ctx_status(g, tsgid, act_eng_id, + &f->runlist_info[runlist_id].reset_eng_bitmask, + timeout_rc_type); + + if (func_ret != 0) { + gk20a_dbg_info("preempt timeout engine %d", act_eng_id); + ret |= func_ret; + } + } + + return ret; +} + +int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid) +{ + struct fifo_gk20a *f = &g->fifo; + u32 tsgid; + + tsgid = f->channel[chid].tsgid; + nvgpu_log_info(g, "chid:%d tsgid:%d", chid, tsgid); + + /* Preempt tsg. Channel preempt is NOOP */ + return g->ops.fifo.preempt_tsg(g, tsgid); +} + +static int __locked_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask) +{ + int ret; + + /* + * Disable runlist event interrupt as it will get + * triggered after runlist preempt finishes + */ + gv11b_fifo_runlist_event_intr_disable(g); + + /* issue runlist preempt */ + gv11b_fifo_issue_runlist_preempt(g, runlists_mask); + + /* poll for runlist preempt done */ + ret = gv11b_fifo_poll_runlist_preempt_pending(g, runlists_mask); + + /* Clear outstanding runlist event */ + gk20a_fifo_handle_runlist_event(g); + + /* Enable runlist event interrupt*/ + gv11b_fifo_runlist_event_intr_enable(g); + + return ret; +} + +/* TSG enable sequence applicable for Volta and onwards */ +int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg) +{ + struct gk20a *g = tsg->g; + struct channel_gk20a *ch; + + nvgpu_rwsem_down_read(&tsg->ch_list_lock); + nvgpu_list_for_each_entry(ch, &tsg->ch_list, channel_gk20a, ch_entry) { + g->ops.fifo.enable_channel(ch); + } + nvgpu_rwsem_up_read(&tsg->ch_list_lock); + + return 0; +} + +int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid) +{ + struct fifo_gk20a *f = &g->fifo; + u32 ret = 0; + u32 token = PMU_INVALID_MUTEX_OWNER_ID; + u32 mutex_ret = 0; + u32 runlist_id; + + gk20a_dbg_fn("%d", tsgid); + + runlist_id = f->tsg[tsgid].runlist_id; + gk20a_dbg_fn("runlist_id %d", runlist_id); + + nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex); + + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + + ret = __locked_fifo_preempt(g, tsgid, true); + + if (!mutex_ret) + nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + + nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex); + + return ret; +} + + +static int gv11b_fifo_preempt_runlists(struct gk20a *g, u32 runlists_mask) +{ + int ret = 0; + u32 token = PMU_INVALID_MUTEX_OWNER_ID; + u32 mutex_ret = 0; + u32 runlist_id; + + gk20a_dbg_fn(""); + + for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { + if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) + nvgpu_mutex_acquire(&g->fifo. + runlist_info[runlist_id].mutex); + } + + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + + ret = __locked_fifo_preempt_runlists(g, runlists_mask); + + if (!mutex_ret) + nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + + for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { + if (runlists_mask & fifo_runlist_preempt_runlist_m(runlist_id)) + nvgpu_mutex_release(&g->fifo. + runlist_info[runlist_id].mutex); + } + + return ret; +} + +static int __locked_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, + unsigned int id_type, unsigned int timeout_rc_type) +{ + int ret; + struct fifo_gk20a *f = &g->fifo; + + nvgpu_log_fn(g, "id:%d id_type:%d", id, id_type); + + /* Issue tsg preempt. Channel preempt is noop */ + if (id_type == ID_TYPE_CHANNEL) + gk20a_fifo_issue_preempt(g, f->channel[id].tsgid, true); + else + gk20a_fifo_issue_preempt(g, id, true); + + /* wait for preempt */ + ret = g->ops.fifo.is_preempt_pending(g, id, id_type, + timeout_rc_type); + + if (ret && (timeout_rc_type == PREEMPT_TIMEOUT_RC)) + __locked_fifo_preempt_timeout_rc(g, id, id_type); + + return ret; +} + + +int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, + unsigned int id_type, unsigned int timeout_rc_type) +{ + struct fifo_gk20a *f = &g->fifo; + u32 ret = 0; + u32 token = PMU_INVALID_MUTEX_OWNER_ID; + u32 mutex_ret = 0; + u32 runlist_id; + + if (id_type == ID_TYPE_TSG) + runlist_id = f->tsg[id].runlist_id; + else if (id_type == ID_TYPE_CHANNEL) + runlist_id = f->channel[id].runlist_id; + else + return -EINVAL; + + if (runlist_id >= g->fifo.max_runlists) { + gk20a_dbg_info("runlist_id = %d", runlist_id); + return -EINVAL; + } + + gk20a_dbg_fn("preempt id = %d, runlist_id = %d", id, runlist_id); + + nvgpu_mutex_acquire(&f->runlist_info[runlist_id].mutex); + + mutex_ret = nvgpu_pmu_mutex_acquire(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + + ret = __locked_fifo_preempt_ch_tsg(g, id, id_type, timeout_rc_type); + + if (!mutex_ret) + nvgpu_pmu_mutex_release(&g->pmu, PMU_MUTEX_ID_FIFO, &token); + + nvgpu_mutex_release(&f->runlist_info[runlist_id].mutex); + + return ret; + +} + +void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, + u32 id, unsigned int id_type, unsigned int rc_type, + struct mmu_fault_info *mmfault) +{ + bool verbose = false; + struct tsg_gk20a *tsg = NULL; + struct channel_gk20a *refch = NULL; + u32 runlists_mask, runlist_id; + struct fifo_runlist_info_gk20a *runlist = NULL; + u32 engine_id, client_type = ~0; + + gk20a_dbg_info("active engine ids bitmask =0x%x", act_eng_bitmask); + gk20a_dbg_info("hw id =%d", id); + gk20a_dbg_info("id_type =%d", id_type); + gk20a_dbg_info("rc_type =%d", rc_type); + gk20a_dbg_info("mmu_fault =0x%p", mmfault); + + runlists_mask = gv11b_fifo_get_runlists_mask(g, act_eng_bitmask, id, + id_type, rc_type, mmfault); + + gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_DISABLED, + !RUNLIST_INFO_MUTEX_LOCKED); + + g->fifo.deferred_reset_pending = false; + + /* Disable power management */ + if (g->support_pmu && g->elpg_enabled) { + if (nvgpu_pmu_disable_elpg(g)) + nvgpu_err(g, "failed to set disable elpg"); + } + if (g->ops.clock_gating.slcg_gr_load_gating_prod) + g->ops.clock_gating.slcg_gr_load_gating_prod(g, + false); + if (g->ops.clock_gating.slcg_perf_load_gating_prod) + g->ops.clock_gating.slcg_perf_load_gating_prod(g, + false); + if (g->ops.clock_gating.slcg_ltc_load_gating_prod) + g->ops.clock_gating.slcg_ltc_load_gating_prod(g, + false); + + gr_gk20a_init_cg_mode(g, ELCG_MODE, ELCG_RUN); + + if (rc_type == RC_TYPE_MMU_FAULT) + gk20a_debug_dump(g); + + /* get the channel/TSG */ + if (rc_type == RC_TYPE_MMU_FAULT && mmfault && mmfault->refch) { + refch = mmfault->refch; + client_type = mmfault->client_type; + if (gk20a_is_channel_marked_as_tsg(refch)) + tsg = &g->fifo.tsg[refch->tsgid]; + gv11b_fifo_reset_pbdma_and_eng_faulted(g, refch, + mmfault->faulted_pbdma, + mmfault->faulted_engine); + } else { + if (id_type == ID_TYPE_TSG) + tsg = &g->fifo.tsg[id]; + else if (id_type == ID_TYPE_CHANNEL) + refch = gk20a_channel_get(&g->fifo.channel[id]); + } + + if (id_type == ID_TYPE_TSG || id_type == ID_TYPE_CHANNEL) { + g->ops.fifo.preempt_ch_tsg(g, id, id_type, + PREEMPT_TIMEOUT_NORC); + } else { + gv11b_fifo_preempt_runlists(g, runlists_mask); + } + + if (tsg) { + if (!g->fifo.deferred_reset_pending) { + if (rc_type == RC_TYPE_MMU_FAULT) { + gk20a_fifo_set_ctx_mmu_error_tsg(g, tsg); + verbose = gk20a_fifo_error_tsg(g, tsg); + } + } + gk20a_fifo_abort_tsg(g, tsg->tsgid, false); + if (refch) + gk20a_channel_put(refch); + } else if (refch) { + if (!g->fifo.deferred_reset_pending) { + if (rc_type == RC_TYPE_MMU_FAULT) { + gk20a_fifo_set_ctx_mmu_error_ch(g, refch); + verbose = gk20a_fifo_error_ch(g, refch); + } + } + gk20a_channel_abort(refch, false); + gk20a_channel_put(refch); + } else { + nvgpu_err(g, "id unknown, abort runlist"); + for (runlist_id = 0; runlist_id < g->fifo.max_runlists; + runlist_id++) { + if (runlists_mask & BIT(runlist_id)) + g->ops.fifo.update_runlist(g, runlist_id, + FIFO_INVAL_CHANNEL_ID, false, true); + } + } + + /* check if engine reset should be deferred */ + for (runlist_id = 0; runlist_id < g->fifo.max_runlists; runlist_id++) { + + runlist = &g->fifo.runlist_info[runlist_id]; + if ((runlists_mask & BIT(runlist_id)) && + runlist->reset_eng_bitmask) { + + unsigned long __reset_eng_bitmask = + runlist->reset_eng_bitmask; + + for_each_set_bit(engine_id, &__reset_eng_bitmask, 32) { + if ((refch || tsg) && + gk20a_fifo_should_defer_engine_reset(g, + engine_id, client_type, false)) { + + g->fifo.deferred_fault_engines |= + BIT(engine_id); + + /* handled during channel free */ + g->fifo.deferred_reset_pending = true; + gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + "sm debugger attached," + " deferring channel recovery to channel free"); + } else { + /* + * if lock is already taken, a reset is + * taking place so no need to repeat + */ + if (nvgpu_mutex_tryacquire( + &g->fifo.gr_reset_mutex)) { + + gk20a_fifo_reset_engine(g, + engine_id); + + nvgpu_mutex_release( + &g->fifo.gr_reset_mutex); + } + } + } + } + } + +#ifdef CONFIG_GK20A_CTXSW_TRACE + if (refch) + gk20a_ctxsw_trace_channel_reset(g, refch); + else if (tsg) + gk20a_ctxsw_trace_tsg_reset(g, tsg); +#endif + + gk20a_fifo_set_runlist_state(g, runlists_mask, RUNLIST_ENABLED, + !RUNLIST_INFO_MUTEX_LOCKED); + + /* It is safe to enable ELPG again. */ + if (g->support_pmu && g->elpg_enabled) + nvgpu_pmu_enable_elpg(g); +} + +void gv11b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f) +{ + /* + * These are all errors which indicate something really wrong + * going on in the device + */ + f->intr.pbdma.device_fatal_0 = + pbdma_intr_0_memreq_pending_f() | + pbdma_intr_0_memack_timeout_pending_f() | + pbdma_intr_0_memack_extra_pending_f() | + pbdma_intr_0_memdat_timeout_pending_f() | + pbdma_intr_0_memdat_extra_pending_f() | + pbdma_intr_0_memflush_pending_f() | + pbdma_intr_0_memop_pending_f() | + pbdma_intr_0_lbconnect_pending_f() | + pbdma_intr_0_lback_timeout_pending_f() | + pbdma_intr_0_lback_extra_pending_f() | + pbdma_intr_0_lbdat_timeout_pending_f() | + pbdma_intr_0_lbdat_extra_pending_f() | + pbdma_intr_0_pri_pending_f(); + + /* + * These are data parsing, framing errors or others which can be + * recovered from with intervention... or just resetting the + * channel + */ + f->intr.pbdma.channel_fatal_0 = + pbdma_intr_0_gpfifo_pending_f() | + pbdma_intr_0_gpptr_pending_f() | + pbdma_intr_0_gpentry_pending_f() | + pbdma_intr_0_gpcrc_pending_f() | + pbdma_intr_0_pbptr_pending_f() | + pbdma_intr_0_pbentry_pending_f() | + pbdma_intr_0_pbcrc_pending_f() | + pbdma_intr_0_method_pending_f() | + pbdma_intr_0_methodcrc_pending_f() | + pbdma_intr_0_pbseg_pending_f() | + pbdma_intr_0_clear_faulted_error_pending_f() | + pbdma_intr_0_eng_reset_pending_f() | + pbdma_intr_0_semaphore_pending_f() | + pbdma_intr_0_signature_pending_f(); + + /* Can be used for sw-methods, or represents a recoverable timeout. */ + f->intr.pbdma.restartable_0 = + pbdma_intr_0_device_pending_f(); +} + +static u32 gv11b_fifo_intr_0_en_mask(struct gk20a *g) +{ + u32 intr_0_en_mask; + + intr_0_en_mask = g->ops.fifo.intr_0_error_mask(g); + + intr_0_en_mask |= fifo_intr_0_runlist_event_pending_f() | + fifo_intr_0_pbdma_intr_pending_f() | + fifo_intr_0_ctxsw_timeout_pending_f(); + + return intr_0_en_mask; +} + +int gv11b_init_fifo_reset_enable_hw(struct gk20a *g) +{ + u32 intr_stall; + u32 mask; + u32 timeout; + unsigned int i; + u32 host_num_pbdma = nvgpu_get_litter_value(g, GPU_LIT_HOST_NUM_PBDMA); + + gk20a_dbg_fn(""); + + /* enable pmc pfifo */ + g->ops.mc.reset(g, mc_enable_pfifo_enabled_f()); + + if (g->ops.clock_gating.slcg_ce2_load_gating_prod) + g->ops.clock_gating.slcg_ce2_load_gating_prod(g, + g->slcg_enabled); + if (g->ops.clock_gating.slcg_fifo_load_gating_prod) + g->ops.clock_gating.slcg_fifo_load_gating_prod(g, + g->slcg_enabled); + if (g->ops.clock_gating.blcg_fifo_load_gating_prod) + g->ops.clock_gating.blcg_fifo_load_gating_prod(g, + g->blcg_enabled); + + /* enable pbdma */ + mask = 0; + for (i = 0; i < host_num_pbdma; ++i) + mask |= mc_enable_pb_sel_f(mc_enable_pb_0_enabled_v(), i); + gk20a_writel(g, mc_enable_pb_r(), mask); + + + timeout = gk20a_readl(g, fifo_fb_timeout_r()); + nvgpu_log_info(g, "fifo_fb_timeout reg val = 0x%08x", timeout); + if (!nvgpu_platform_is_silicon(g)) { + timeout = set_field(timeout, fifo_fb_timeout_period_m(), + fifo_fb_timeout_period_max_f()); + timeout = set_field(timeout, fifo_fb_timeout_detection_m(), + fifo_fb_timeout_detection_disabled_f()); + nvgpu_log_info(g, "new fifo_fb_timeout reg val = 0x%08x", + timeout); + gk20a_writel(g, fifo_fb_timeout_r(), timeout); + } + + for (i = 0; i < host_num_pbdma; i++) { + timeout = gk20a_readl(g, pbdma_timeout_r(i)); + nvgpu_log_info(g, "pbdma_timeout reg val = 0x%08x", + timeout); + if (!nvgpu_platform_is_silicon(g)) { + timeout = set_field(timeout, pbdma_timeout_period_m(), + pbdma_timeout_period_max_f()); + nvgpu_log_info(g, "new pbdma_timeout reg val = 0x%08x", + timeout); + gk20a_writel(g, pbdma_timeout_r(i), timeout); + } + } + + /* clear and enable pbdma interrupt */ + for (i = 0; i < host_num_pbdma; i++) { + gk20a_writel(g, pbdma_intr_0_r(i), 0xFFFFFFFF); + gk20a_writel(g, pbdma_intr_1_r(i), 0xFFFFFFFF); + + intr_stall = gk20a_readl(g, pbdma_intr_stall_r(i)); + gk20a_dbg_info("pbdma id:%u, intr_en_0 0x%08x", i, intr_stall); + gk20a_writel(g, pbdma_intr_en_0_r(i), intr_stall); + + intr_stall = gk20a_readl(g, pbdma_intr_stall_1_r(i)); + gk20a_dbg_info("pbdma id:%u, intr_en_1 0x%08x", i, intr_stall); + gk20a_writel(g, pbdma_intr_en_1_r(i), intr_stall); + } + + /* clear ctxsw timeout interrupts */ + gk20a_writel(g, fifo_intr_ctxsw_timeout_r(), ~0); + + if (nvgpu_platform_is_silicon(g)) { + /* enable ctxsw timeout */ + timeout = GRFIFO_TIMEOUT_CHECK_PERIOD_US; + timeout = scale_ptimer(timeout, + ptimer_scalingfactor10x(g->ptimer_src_freq)); + timeout |= fifo_eng_ctxsw_timeout_detection_enabled_f(); + gk20a_writel(g, fifo_eng_ctxsw_timeout_r(), timeout); + } else { + timeout = gk20a_readl(g, fifo_eng_ctxsw_timeout_r()); + nvgpu_log_info(g, "fifo_eng_ctxsw_timeout reg val = 0x%08x", + timeout); + timeout = set_field(timeout, fifo_eng_ctxsw_timeout_period_m(), + fifo_eng_ctxsw_timeout_period_max_f()); + timeout = set_field(timeout, + fifo_eng_ctxsw_timeout_detection_m(), + fifo_eng_ctxsw_timeout_detection_disabled_f()); + nvgpu_log_info(g, "new fifo_eng_ctxsw_timeout reg val = 0x%08x", + timeout); + gk20a_writel(g, fifo_eng_ctxsw_timeout_r(), timeout); + } + + /* clear runlist interrupts */ + gk20a_writel(g, fifo_intr_runlist_r(), ~0); + + /* clear and enable pfifo interrupt */ + gk20a_writel(g, fifo_intr_0_r(), 0xFFFFFFFF); + mask = gv11b_fifo_intr_0_en_mask(g); + gk20a_dbg_info("fifo_intr_en_0 0x%08x", mask); + gk20a_writel(g, fifo_intr_en_0_r(), mask); + gk20a_dbg_info("fifo_intr_en_1 = 0x80000000"); + gk20a_writel(g, fifo_intr_en_1_r(), 0x80000000); + + gk20a_dbg_fn("done"); + + return 0; +} + +static const char *const gv11b_sched_error_str[] = { + "xxx-0", + "xxx-1", + "xxx-2", + "xxx-3", + "xxx-4", + "engine_reset", + "rl_ack_timeout", + "rl_ack_extra", + "rl_rdat_timeout", + "rl_rdat_extra", + "xxx-a", + "xxx-b", + "rl_req_timeout", + "new_runlist", + "code_config_while_busy", + "xxx-f", + "xxx-0x10", + "xxx-0x11", + "xxx-0x12", + "xxx-0x13", + "xxx-0x14", + "xxx-0x15", + "xxx-0x16", + "xxx-0x17", + "xxx-0x18", + "xxx-0x19", + "xxx-0x1a", + "xxx-0x1b", + "xxx-0x1c", + "xxx-0x1d", + "xxx-0x1e", + "xxx-0x1f", + "bad_tsg", +}; + +bool gv11b_fifo_handle_sched_error(struct gk20a *g) +{ + u32 sched_error; + + sched_error = gk20a_readl(g, fifo_intr_sched_error_r()); + + if (sched_error < ARRAY_SIZE(gv11b_sched_error_str)) + nvgpu_err(g, "fifo sched error :%s", + gv11b_sched_error_str[sched_error]); + else + nvgpu_err(g, "fifo sched error code not supported"); + + if (sched_error == SCHED_ERROR_CODE_BAD_TSG ) { + /* id is unknown, preempt all runlists and do recovery */ + gk20a_fifo_recover(g, 0, 0, false, false, false); + } + + return false; +} + +static u32 gv11b_fifo_ctxsw_timeout_info(struct gk20a *g, u32 active_eng_id) +{ + u32 tsgid = FIFO_INVAL_TSG_ID; + u32 timeout_info; + u32 ctx_status, info_status; + + timeout_info = gk20a_readl(g, + fifo_intr_ctxsw_timeout_info_r(active_eng_id)); + + /* + * ctxsw_state and tsgid are snapped at the point of the timeout and + * will not change while the corresponding INTR_CTXSW_TIMEOUT_ENGINE bit + * is PENDING. + */ + ctx_status = fifo_intr_ctxsw_timeout_info_ctxsw_state_v(timeout_info); + if (ctx_status == + fifo_intr_ctxsw_timeout_info_ctxsw_state_load_v()) { + + tsgid = fifo_intr_ctxsw_timeout_info_next_tsgid_v(timeout_info); + + } else if (ctx_status == + fifo_intr_ctxsw_timeout_info_ctxsw_state_switch_v() || + ctx_status == + fifo_intr_ctxsw_timeout_info_ctxsw_state_save_v()) { + + tsgid = fifo_intr_ctxsw_timeout_info_prev_tsgid_v(timeout_info); + } + gk20a_dbg_info("ctxsw timeout info: tsgid = %d", tsgid); + + /* + * STATUS indicates whether the context request ack was eventually + * received and whether a subsequent request timed out. This field is + * updated live while the corresponding INTR_CTXSW_TIMEOUT_ENGINE bit + * is PENDING. STATUS starts in AWAITING_ACK, and progresses to + * ACK_RECEIVED and finally ends with DROPPED_TIMEOUT. + * + * AWAITING_ACK - context request ack still not returned from engine. + * ENG_WAS_RESET - The engine was reset via a PRI write to NV_PMC_ENABLE + * or NV_PMC_ELPG_ENABLE prior to receiving the ack. Host will not + * expect ctx ack to return, but if it is already in flight, STATUS will + * transition shortly to ACK_RECEIVED unless the interrupt is cleared + * first. Once the engine is reset, additional context switches can + * occur; if one times out, STATUS will transition to DROPPED_TIMEOUT + * if the interrupt isn't cleared first. + * ACK_RECEIVED - The ack for the timed-out context request was + * received between the point of the timeout and this register being + * read. Note this STATUS can be reported during the load stage of the + * same context switch that timed out if the timeout occurred during the + * save half of a context switch. Additional context requests may have + * completed or may be outstanding, but no further context timeout has + * occurred. This simplifies checking for spurious context switch + * timeouts. + * DROPPED_TIMEOUT - The originally timed-out context request acked, + * but a subsequent context request then timed out. + * Information about the subsequent timeout is not stored; in fact, that + * context request may also have already been acked by the time SW + * SW reads this register. If not, there is a chance SW can get the + * dropped information by clearing the corresponding + * INTR_CTXSW_TIMEOUT_ENGINE bit and waiting for the timeout to occur + * again. Note, however, that if the engine does time out again, + * it may not be from the original request that caused the + * DROPPED_TIMEOUT state, as that request may + * be acked in the interim. + */ + info_status = fifo_intr_ctxsw_timeout_info_status_v(timeout_info); + if (info_status == + fifo_intr_ctxsw_timeout_info_status_awaiting_ack_v()) { + + gk20a_dbg_info("ctxsw timeout info : awaiting ack"); + + } else if (info_status == + fifo_intr_ctxsw_timeout_info_status_eng_was_reset_v()) { + + gk20a_dbg_info("ctxsw timeout info : eng was reset"); + + } else if (info_status == + fifo_intr_ctxsw_timeout_info_status_ack_received_v()) { + + gk20a_dbg_info("ctxsw timeout info : ack received"); + /* no need to recover */ + tsgid = FIFO_INVAL_TSG_ID; + + } else if (info_status == + fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v()) { + + gk20a_dbg_info("ctxsw timeout info : dropped timeout"); + /* no need to recover */ + tsgid = FIFO_INVAL_TSG_ID; + + } else { + gk20a_dbg_info("ctxsw timeout info status = %u", info_status); + } + + return tsgid; +} + +bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr) +{ + bool ret = false; + u32 tsgid = FIFO_INVAL_TSG_ID; + u32 engine_id, active_eng_id; + u32 timeout_val, ctxsw_timeout_engines; + + + if (!(fifo_intr & fifo_intr_0_ctxsw_timeout_pending_f())) + return ret; + + /* get ctxsw timedout engines */ + ctxsw_timeout_engines = gk20a_readl(g, fifo_intr_ctxsw_timeout_r()); + if (ctxsw_timeout_engines == 0) { + nvgpu_err(g, "no eng ctxsw timeout pending"); + return ret; + } + + timeout_val = gk20a_readl(g, fifo_eng_ctxsw_timeout_r()); + timeout_val = fifo_eng_ctxsw_timeout_period_v(timeout_val); + + gk20a_dbg_info("eng ctxsw timeout period = 0x%x", timeout_val); + + for (engine_id = 0; engine_id < g->fifo.num_engines; engine_id++) { + active_eng_id = g->fifo.active_engines_list[engine_id]; + + if (ctxsw_timeout_engines & + fifo_intr_ctxsw_timeout_engine_pending_f( + active_eng_id)) { + + struct fifo_gk20a *f = &g->fifo; + u32 ms = 0; + bool verbose = false; + + tsgid = gv11b_fifo_ctxsw_timeout_info(g, active_eng_id); + + if (tsgid == FIFO_INVAL_TSG_ID) + continue; + + if (gk20a_fifo_check_tsg_ctxsw_timeout( + &f->tsg[tsgid], &verbose, &ms)) { + ret = true; + nvgpu_err(g, + "ctxsw timeout error:" + "active engine id =%u, %s=%d, ms=%u", + active_eng_id, "tsg", tsgid, ms); + + /* Cancel all channels' timeout */ + gk20a_channel_timeout_restart_all_channels(g); + gk20a_fifo_recover(g, BIT(active_eng_id), tsgid, + true, true, verbose); + } else { + gk20a_dbg_info( + "fifo is waiting for ctx switch: " + "for %d ms, %s=%d", ms, "tsg", tsgid); + } + } + } + /* clear interrupt */ + gk20a_writel(g, fifo_intr_ctxsw_timeout_r(), ctxsw_timeout_engines); + return ret; +} + +unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g, + u32 pbdma_id, u32 pbdma_intr_0, + u32 *handled, u32 *error_notifier) +{ + unsigned int rc_type = RC_TYPE_NO_RC; + + rc_type = gk20a_fifo_handle_pbdma_intr_0(g, pbdma_id, + pbdma_intr_0, handled, error_notifier); + + if (pbdma_intr_0 & pbdma_intr_0_clear_faulted_error_pending_f()) { + gk20a_dbg(gpu_dbg_intr, "clear faulted error on pbdma id %d", + pbdma_id); + gk20a_fifo_reset_pbdma_method(g, pbdma_id, 0); + *handled |= pbdma_intr_0_clear_faulted_error_pending_f(); + rc_type = RC_TYPE_PBDMA_FAULT; + } + + if (pbdma_intr_0 & pbdma_intr_0_eng_reset_pending_f()) { + gk20a_dbg(gpu_dbg_intr, "eng reset intr on pbdma id %d", + pbdma_id); + *handled |= pbdma_intr_0_eng_reset_pending_f(); + rc_type = RC_TYPE_PBDMA_FAULT; + } + + return rc_type; +} + +/* + * Pbdma which encountered the ctxnotvalid interrupt will stall and + * prevent the channel which was loaded at the time the interrupt fired + * from being swapped out until the interrupt is cleared. + * CTXNOTVALID pbdma interrupt indicates error conditions related + * to the *_CTX_VALID fields for a channel. The following + * conditions trigger the interrupt: + * * CTX_VALID bit for the targeted engine is FALSE + * * At channel start/resume, all preemptible eng have CTX_VALID FALSE but: + * - CTX_RELOAD is set in CCSR_CHANNEL_STATUS, + * - PBDMA_TARGET_SHOULD_SEND_HOST_TSG_EVENT is TRUE, or + * - PBDMA_TARGET_NEEDS_HOST_TSG_EVENT is TRUE + * The field is left NOT_PENDING and the interrupt is not raised if the PBDMA is + * currently halted. This allows SW to unblock the PBDMA and recover. + * SW may read METHOD0, CHANNEL_STATUS and TARGET to determine whether the + * interrupt was due to an engine method, CTX_RELOAD, SHOULD_SEND_HOST_TSG_EVENT + * or NEEDS_HOST_TSG_EVENT. If METHOD0 VALID is TRUE, lazy context creation + * can be used or the TSG may be destroyed. + * If METHOD0 VALID is FALSE, the error is likely a bug in SW, and the TSG + * will have to be destroyed. + */ + +unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g, + u32 pbdma_id, u32 pbdma_intr_1, + u32 *handled, u32 *error_notifier) +{ + unsigned int rc_type = RC_TYPE_PBDMA_FAULT; + u32 pbdma_intr_1_current = gk20a_readl(g, pbdma_intr_1_r(pbdma_id)); + + /* minimize race with the gpu clearing the pending interrupt */ + if (!(pbdma_intr_1_current & + pbdma_intr_1_ctxnotvalid_pending_f())) + pbdma_intr_1 &= ~pbdma_intr_1_ctxnotvalid_pending_f(); + + if (pbdma_intr_1 == 0) + return RC_TYPE_NO_RC; + + if (pbdma_intr_1 & pbdma_intr_1_ctxnotvalid_pending_f()) { + gk20a_dbg(gpu_dbg_intr, "ctxnotvalid intr on pbdma id %d", + pbdma_id); + nvgpu_err(g, "pbdma_intr_1(%d)= 0x%08x ", + pbdma_id, pbdma_intr_1); + *handled |= pbdma_intr_1_ctxnotvalid_pending_f(); + } else{ + /* + * rest of the interrupts in _intr_1 are "host copy engine" + * related, which is not supported. For now just make them + * channel fatal. + */ + nvgpu_err(g, "hce err: pbdma_intr_1(%d):0x%08x", + pbdma_id, pbdma_intr_1); + *handled |= pbdma_intr_1; + } + + return rc_type; +} + +static void gv11b_fifo_init_ramfc_eng_method_buffer(struct gk20a *g, + struct channel_gk20a *ch, struct nvgpu_mem *mem) +{ + struct tsg_gk20a *tsg; + struct nvgpu_mem *method_buffer_per_runque; + + tsg = tsg_gk20a_from_ch(ch); + if (tsg == NULL) { + nvgpu_err(g, "channel is not part of tsg"); + return; + } + if (tsg->eng_method_buffers == NULL) { + nvgpu_log_info(g, "eng method buffer NULL"); + return; + } + if (tsg->runlist_id == gk20a_fifo_get_fast_ce_runlist_id(g)) + method_buffer_per_runque = + &tsg->eng_method_buffers[ASYNC_CE_RUNQUE]; + else + method_buffer_per_runque = + &tsg->eng_method_buffers[GR_RUNQUE]; + + nvgpu_mem_wr32(g, mem, ram_in_eng_method_buffer_addr_lo_w(), + u64_lo32(method_buffer_per_runque->gpu_va)); + nvgpu_mem_wr32(g, mem, ram_in_eng_method_buffer_addr_hi_w(), + u64_hi32(method_buffer_per_runque->gpu_va)); + + nvgpu_log_info(g, "init ramfc with method buffer"); +} + +unsigned int gv11b_fifo_get_eng_method_buffer_size(struct gk20a *g) +{ + unsigned int buffer_size; + + buffer_size = ((9 + 1 + 3) * g->ops.ce2.get_num_pce(g)) + 2; + buffer_size = (27 * 5 * buffer_size); + buffer_size = roundup(buffer_size, PAGE_SIZE); + nvgpu_log_info(g, "method buffer size in bytes %d", buffer_size); + + return buffer_size; +} + +void gv11b_fifo_init_eng_method_buffers(struct gk20a *g, + struct tsg_gk20a *tsg) +{ + struct vm_gk20a *vm = g->mm.bar2.vm; + int err = 0; + int i; + unsigned int runque, method_buffer_size; + unsigned int num_pbdma = g->fifo.num_pbdma; + + if (tsg->eng_method_buffers != NULL) + return; + + method_buffer_size = gv11b_fifo_get_eng_method_buffer_size(g); + if (method_buffer_size == 0) { + nvgpu_info(g, "ce will hit MTHD_BUFFER_FAULT"); + return; + } + + tsg->eng_method_buffers = nvgpu_kzalloc(g, + num_pbdma * sizeof(struct nvgpu_mem)); + + for (runque = 0; runque < num_pbdma; runque++) { + err = nvgpu_dma_alloc_map_sys(vm, method_buffer_size, + &tsg->eng_method_buffers[runque]); + if (err) + break; + } + if (err) { + for (i = (runque - 1); i >= 0; i--) + nvgpu_dma_unmap_free(vm, + &tsg->eng_method_buffers[i]); + + nvgpu_kfree(g, tsg->eng_method_buffers); + tsg->eng_method_buffers = NULL; + nvgpu_err(g, "could not alloc eng method buffers"); + return; + } + nvgpu_log_info(g, "eng method buffers allocated"); + +} + +void gv11b_fifo_deinit_eng_method_buffers(struct gk20a *g, + struct tsg_gk20a *tsg) +{ + struct vm_gk20a *vm = g->mm.bar2.vm; + unsigned int runque; + + if (tsg->eng_method_buffers == NULL) + return; + + for (runque = 0; runque < g->fifo.num_pbdma; runque++) + nvgpu_dma_unmap_free(vm, &tsg->eng_method_buffers[runque]); + + nvgpu_kfree(g, tsg->eng_method_buffers); + tsg->eng_method_buffers = NULL; + + nvgpu_log_info(g, "eng method buffers de-allocated"); +} + +#ifdef CONFIG_TEGRA_GK20A_NVHOST +int gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c, + u32 syncpt_id, struct nvgpu_mem *syncpt_buf) +{ + u32 nr_pages; + int err = 0; + struct gk20a *g = c->g; + struct vm_gk20a *vm = c->vm; + + /* + * Add ro map for complete sync point shim range in vm + * All channels sharing same vm will share same ro mapping. + * Create rw map for current channel sync point + */ + if (!vm->syncpt_ro_map_gpu_va) { + vm->syncpt_ro_map_gpu_va = nvgpu_gmmu_map(c->vm, + &g->syncpt_mem, g->syncpt_unit_size, + 0, gk20a_mem_flag_read_only, + false, APERTURE_SYSMEM); + + if (!vm->syncpt_ro_map_gpu_va) { + nvgpu_err(g, "failed to ro map syncpt buffer"); + nvgpu_dma_free(g, &g->syncpt_mem); + err = -ENOMEM; + } + } + + nr_pages = DIV_ROUND_UP(g->syncpt_size, PAGE_SIZE); + __nvgpu_mem_create_from_phys(g, syncpt_buf, + (g->syncpt_unit_base + + nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id)), + nr_pages); + syncpt_buf->gpu_va = nvgpu_gmmu_map(c->vm, syncpt_buf, + g->syncpt_size, 0, gk20a_mem_flag_none, + false, APERTURE_SYSMEM); + + if (!syncpt_buf->gpu_va) { + nvgpu_err(g, "failed to map syncpt buffer"); + nvgpu_dma_free(g, syncpt_buf); + err = -ENOMEM; + } + return err; +} + +void gv11b_fifo_free_syncpt_buf(struct channel_gk20a *c, + struct nvgpu_mem *syncpt_buf) +{ + nvgpu_gmmu_unmap(c->vm, syncpt_buf, syncpt_buf->gpu_va); + nvgpu_dma_free(c->g, syncpt_buf); +} + +void gv11b_fifo_add_syncpt_wait_cmd(struct gk20a *g, + struct priv_cmd_entry *cmd, u32 off, + u32 id, u32 thresh, u64 gpu_va_base) +{ + u64 gpu_va = gpu_va_base + + nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(id); + + gk20a_dbg_fn(""); + + off = cmd->off + off; + + /* semaphore_a */ + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004); + nvgpu_mem_wr32(g, cmd->mem, off++, + (gpu_va >> 32) & 0xff); + /* semaphore_b */ + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010005); + /* offset */ + nvgpu_mem_wr32(g, cmd->mem, off++, gpu_va & 0xffffffff); + + /* semaphore_c */ + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006); + /* payload */ + nvgpu_mem_wr32(g, cmd->mem, off++, thresh); + /* semaphore_d */ + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007); + /* operation: acq_geq, switch_en */ + nvgpu_mem_wr32(g, cmd->mem, off++, 0x4 | (0x1 << 12)); +} + +u32 gv11b_fifo_get_syncpt_wait_cmd_size(void) +{ + return 8; +} + +void gv11b_fifo_add_syncpt_incr_cmd(struct gk20a *g, + bool wfi_cmd, struct priv_cmd_entry *cmd, + u32 id, u64 gpu_va) +{ + u32 off = cmd->off; + + gk20a_dbg_fn(""); + + /* semaphore_a */ + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010004); + nvgpu_mem_wr32(g, cmd->mem, off++, + (gpu_va >> 32) & 0xff); + /* semaphore_b */ + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010005); + /* offset */ + nvgpu_mem_wr32(g, cmd->mem, off++, gpu_va & 0xffffffff); + + /* semaphore_c */ + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010006); + /* payload */ + nvgpu_mem_wr32(g, cmd->mem, off++, 0x0); + /* semaphore_d */ + nvgpu_mem_wr32(g, cmd->mem, off++, 0x20010007); + + /* operation: release, wfi */ + nvgpu_mem_wr32(g, cmd->mem, off++, + 0x2 | ((wfi_cmd ? 0x0 : 0x1) << 20)); + /* ignored */ + nvgpu_mem_wr32(g, cmd->mem, off++, 0); +} + +u32 gv11b_fifo_get_syncpt_incr_cmd_size(bool wfi_cmd) +{ + return 9; +} +#endif /* CONFIG_TEGRA_GK20A_NVHOST */ + +int gv11b_init_fifo_setup_hw(struct gk20a *g) +{ + struct fifo_gk20a *f = &g->fifo; + + f->t19x.max_subctx_count = + gr_pri_fe_chip_def_info_max_veid_count_init_v(); + return 0; +} + +static u32 gv11b_mmu_fault_id_to_gr_veid(struct gk20a *g, u32 gr_eng_fault_id, + u32 mmu_fault_id) +{ + struct fifo_gk20a *f = &g->fifo; + u32 num_subctx; + u32 veid = FIFO_INVAL_VEID; + + num_subctx = f->t19x.max_subctx_count; + + if (mmu_fault_id >= gr_eng_fault_id && + mmu_fault_id < (gr_eng_fault_id + num_subctx)) + veid = mmu_fault_id - gr_eng_fault_id; + + return veid; +} + +static u32 gv11b_mmu_fault_id_to_eng_id_and_veid(struct gk20a *g, + u32 mmu_fault_id, u32 *veid) +{ + u32 engine_id; + u32 active_engine_id; + struct fifo_engine_info_gk20a *engine_info; + struct fifo_gk20a *f = &g->fifo; + + + for (engine_id = 0; engine_id < f->num_engines; engine_id++) { + active_engine_id = f->active_engines_list[engine_id]; + engine_info = &g->fifo.engine_info[active_engine_id]; + + if (active_engine_id == ENGINE_GR_GK20A) { + /* get faulted subctx id */ + *veid = gv11b_mmu_fault_id_to_gr_veid(g, + engine_info->fault_id, mmu_fault_id); + if (*veid != FIFO_INVAL_VEID) + break; + } else { + if (engine_info->fault_id == mmu_fault_id) + break; + } + + active_engine_id = FIFO_INVAL_ENGINE_ID; + } + return active_engine_id; +} + +static u32 gv11b_mmu_fault_id_to_pbdma_id(struct gk20a *g, u32 mmu_fault_id) +{ + u32 num_pbdma, reg_val, fault_id_pbdma0; + + reg_val = gk20a_readl(g, fifo_cfg0_r()); + num_pbdma = fifo_cfg0_num_pbdma_v(reg_val); + fault_id_pbdma0 = fifo_cfg0_pbdma_fault_id_v(reg_val); + + if (mmu_fault_id >= fault_id_pbdma0 && + mmu_fault_id <= fault_id_pbdma0 + num_pbdma - 1) + return mmu_fault_id - fault_id_pbdma0; + + return FIFO_INVAL_PBDMA_ID; +} + +void gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(struct gk20a *g, + u32 mmu_fault_id, u32 *active_engine_id, u32 *veid, u32 *pbdma_id) +{ + *active_engine_id = gv11b_mmu_fault_id_to_eng_id_and_veid(g, + mmu_fault_id, veid); + + if (*active_engine_id == FIFO_INVAL_ENGINE_ID) + *pbdma_id = gv11b_mmu_fault_id_to_pbdma_id(g, mmu_fault_id); + else + *pbdma_id = FIFO_INVAL_PBDMA_ID; +} + +static bool gk20a_fifo_channel_status_is_eng_faulted(struct gk20a *g, u32 chid) +{ + u32 channel = gk20a_readl(g, ccsr_channel_r(chid)); + + return ccsr_channel_eng_faulted_v(channel) == + ccsr_channel_eng_faulted_true_v(); +} + +void gv11b_fifo_tsg_verify_status_faulted(struct channel_gk20a *ch) +{ + struct gk20a *g = ch->g; + struct tsg_gk20a *tsg = &g->fifo.tsg[ch->tsgid]; + + /* + * If channel has FAULTED set, clear the CE method buffer + * if saved out channel is same as faulted channel + */ + if (!gk20a_fifo_channel_status_is_eng_faulted(g, ch->chid)) + return; + + if (tsg->eng_method_buffers == NULL) + return; + + /* + * CE method buffer format : + * DWord0 = method count + * DWord1 = channel id + * + * It is sufficient to write 0 to method count to invalidate + */ + if ((u32)ch->chid == + nvgpu_mem_rd32(g, &tsg->eng_method_buffers[ASYNC_CE_RUNQUE], 1)) + nvgpu_mem_wr32(g, &tsg->eng_method_buffers[ASYNC_CE_RUNQUE], 0, 0); +} diff --git a/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h new file mode 100644 index 000000000..fc1ddf832 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/fifo_gv11b.h @@ -0,0 +1,117 @@ +/* + * GV11B Fifo + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef FIFO_GV11B_H +#define FIFO_GV11B_H + +#define FIFO_INVAL_PBDMA_ID ((u32)~0) +#define FIFO_INVAL_VEID ((u32)~0) + +/* engine context-switch request occurred while the engine was in reset */ +#define SCHED_ERROR_CODE_ENGINE_RESET 0x00000005 + +/* +* ERROR_CODE_BAD_TSG indicates that Host encountered a badly formed TSG header +* or a badly formed channel type runlist entry in the runlist. This is typically +* caused by encountering a new TSG entry in the middle of a TSG definition. +* A channel type entry having wrong runqueue selector can also cause this. +* Additionally this error code can indicate when a channel is encountered on +* the runlist which is outside of a TSG. +*/ +#define SCHED_ERROR_CODE_BAD_TSG 0x00000020 + +/* can be removed after runque support is added */ + +#define GR_RUNQUE 0 /* pbdma 0 */ +#define ASYNC_CE_RUNQUE 2 /* pbdma 2 */ + +#define CHANNEL_INFO_VEID0 0 + +struct gpu_ops; + +void gv11b_fifo_reset_pbdma_and_eng_faulted(struct gk20a *g, + struct channel_gk20a *refch, + u32 faulted_pbdma, u32 faulted_engine); +void gv11b_mmu_fault_id_to_eng_pbdma_id_and_veid(struct gk20a *g, + u32 mmu_fault_id, u32 *active_engine_id, u32 *veid, u32 *pbdma_id); + +void gv11b_get_tsg_runlist_entry(struct tsg_gk20a *tsg, u32 *runlist); +void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist); +int channel_gv11b_setup_ramfc(struct channel_gk20a *c, + u64 gpfifo_base, u32 gpfifo_entries, + unsigned long acquire_timeout, u32 flags); +u32 gv11b_userd_gp_get(struct gk20a *g, struct channel_gk20a *c); +u64 gv11b_userd_pb_get(struct gk20a *g, struct channel_gk20a *c); +void gv11b_userd_gp_put(struct gk20a *g, struct channel_gk20a *c); +void channel_gv11b_unbind(struct channel_gk20a *ch); +u32 gv11b_fifo_get_num_fifos(struct gk20a *g); +bool gv11b_is_fault_engine_subid_gpc(struct gk20a *g, u32 engine_subid); +void gv11b_dump_channel_status_ramfc(struct gk20a *g, + struct gk20a_debug_output *o, + u32 chid, + struct ch_state *ch_state); +void gv11b_dump_eng_status(struct gk20a *g, + struct gk20a_debug_output *o); +u32 gv11b_fifo_intr_0_error_mask(struct gk20a *g); +int gv11b_fifo_is_preempt_pending(struct gk20a *g, u32 id, + unsigned int id_type, unsigned int timeout_rc_type); +int gv11b_fifo_preempt_channel(struct gk20a *g, u32 chid); +int gv11b_fifo_preempt_tsg(struct gk20a *g, u32 tsgid); +int gv11b_fifo_enable_tsg(struct tsg_gk20a *tsg); +int gv11b_fifo_preempt_ch_tsg(struct gk20a *g, u32 id, + unsigned int id_type, unsigned int timeout_rc_type); +void gv11b_fifo_teardown_ch_tsg(struct gk20a *g, u32 act_eng_bitmask, + u32 id, unsigned int id_type, unsigned int rc_type, + struct mmu_fault_info *mmfault); +void gv11b_fifo_init_pbdma_intr_descs(struct fifo_gk20a *f); +int gv11b_init_fifo_reset_enable_hw(struct gk20a *g); +bool gv11b_fifo_handle_sched_error(struct gk20a *g); +bool gv11b_fifo_handle_ctxsw_timeout(struct gk20a *g, u32 fifo_intr); +unsigned int gv11b_fifo_handle_pbdma_intr_0(struct gk20a *g, + u32 pbdma_id, u32 pbdma_intr_0, + u32 *handled, u32 *error_notifier); +unsigned int gv11b_fifo_handle_pbdma_intr_1(struct gk20a *g, + u32 pbdma_id, u32 pbdma_intr_1, + u32 *handled, u32 *error_notifier); +void gv11b_fifo_init_eng_method_buffers(struct gk20a *g, + struct tsg_gk20a *tsg); +void gv11b_fifo_deinit_eng_method_buffers(struct gk20a *g, + struct tsg_gk20a *tsg); +int gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c, + u32 syncpt_id, struct nvgpu_mem *syncpt_buf); +void gv11b_fifo_free_syncpt_buf(struct channel_gk20a *c, + struct nvgpu_mem *syncpt_buf); +void gv11b_fifo_add_syncpt_wait_cmd(struct gk20a *g, + struct priv_cmd_entry *cmd, u32 off, + u32 id, u32 thresh, u64 gpu_va_base); +u32 gv11b_fifo_get_syncpt_wait_cmd_size(void); +void gv11b_fifo_add_syncpt_incr_cmd(struct gk20a *g, + bool wfi_cmd, struct priv_cmd_entry *cmd, + u32 id, u64 gpu_va_base); +u32 gv11b_fifo_get_syncpt_incr_cmd_size(bool wfi_cmd); +int gv11b_init_fifo_setup_hw(struct gk20a *g); + +void gv11b_fifo_tsg_verify_status_faulted(struct channel_gk20a *ch); +u32 gv11b_fifo_get_preempt_timeout(struct gk20a *g); +#endif diff --git a/drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.c new file mode 100644 index 000000000..514aadb12 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.c @@ -0,0 +1,72 @@ +/* + * + * GV11B Graphics Context + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gk20a/gk20a.h" + +#include "gr_ctx_gv11b.h" + +int gr_gv11b_get_netlist_name(struct gk20a *g, int index, char *name) +{ + switch (index) { +#ifdef GV11B_NETLIST_IMAGE_FW_NAME + case NETLIST_FINAL: + sprintf(name, GV11B_NETLIST_IMAGE_FW_NAME); + return 0; +#endif +#ifdef GK20A_NETLIST_IMAGE_A + case NETLIST_SLOT_A: + sprintf(name, GK20A_NETLIST_IMAGE_A); + return 0; +#endif +#ifdef GK20A_NETLIST_IMAGE_B + case NETLIST_SLOT_B: + sprintf(name, GK20A_NETLIST_IMAGE_B); + return 0; +#endif +#ifdef GK20A_NETLIST_IMAGE_C + case NETLIST_SLOT_C: + sprintf(name, GK20A_NETLIST_IMAGE_C); + return 0; +#endif +#ifdef GK20A_NETLIST_IMAGE_D + case NETLIST_SLOT_D: + sprintf(name, GK20A_NETLIST_IMAGE_D); + return 0; +#endif + default: + return -1; + } + + return -1; +} + +bool gr_gv11b_is_firmware_defined(void) +{ +#ifdef GV11B_NETLIST_IMAGE_FW_NAME + return true; +#else + return false; +#endif +} diff --git a/drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.h b/drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.h new file mode 100644 index 000000000..0a95ab11b --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/gr_ctx_gv11b.h @@ -0,0 +1,36 @@ +/* + * GV11B Graphics Context + * + * Copyright (c) 2016 - 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __GR_CTX_GV11B_H__ +#define __GR_CTX_GV11B_H__ + +#include "gk20a/gr_ctx_gk20a.h" + +/* Define netlist for silicon only */ + +#define GV11B_NETLIST_IMAGE_FW_NAME GK20A_NETLIST_IMAGE_D + +int gr_gv11b_get_netlist_name(struct gk20a *g, int index, char *name); +bool gr_gv11b_is_firmware_defined(void); + +#endif /*__GR_CTX_GV11B_H__*/ diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.c b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c new file mode 100644 index 000000000..3d817d7e3 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.c @@ -0,0 +1,3639 @@ +/* + * GV11b GPU GR + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include + +#include "gk20a/gk20a.h" +#include "gk20a/gr_gk20a.h" +#include "gk20a/dbg_gpu_gk20a.h" +#include "gk20a/regops_gk20a.h" +#include "gk20a/gr_pri_gk20a.h" + +#include "gm20b/gr_gm20b.h" + +#include "gp10b/gr_gp10b.h" + +#include "gv11b/gr_gv11b.h" +#include "gv11b/mm_gv11b.h" +#include "gv11b/subctx_gv11b.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +bool gr_gv11b_is_valid_class(struct gk20a *g, u32 class_num) +{ + bool valid = false; + + switch (class_num) { + case VOLTA_COMPUTE_A: + case VOLTA_A: + case VOLTA_DMA_COPY_A: + valid = true; + break; + + case MAXWELL_COMPUTE_B: + case MAXWELL_B: + case FERMI_TWOD_A: + case KEPLER_DMA_COPY_A: + case MAXWELL_DMA_COPY_A: + case PASCAL_COMPUTE_A: + case PASCAL_A: + case PASCAL_DMA_COPY_A: + valid = true; + break; + + default: + break; + } + gk20a_dbg_info("class=0x%x valid=%d", class_num, valid); + return valid; +} + +bool gr_gv11b_is_valid_gfx_class(struct gk20a *g, u32 class_num) +{ + bool valid = false; + + switch (class_num) { + case VOLTA_A: + case PASCAL_A: + case MAXWELL_B: + valid = true; + break; + + default: + break; + } + return valid; +} + +bool gr_gv11b_is_valid_compute_class(struct gk20a *g, u32 class_num) +{ + bool valid = false; + + switch (class_num) { + case VOLTA_COMPUTE_A: + case PASCAL_COMPUTE_A: + case MAXWELL_COMPUTE_B: + valid = true; + break; + + default: + break; + } + return valid; +} + +static u32 gv11b_gr_sm_offset(struct gk20a *g, u32 sm) +{ + + u32 sm_pri_stride = nvgpu_get_litter_value(g, GPU_LIT_SM_PRI_STRIDE); + u32 sm_offset = sm_pri_stride * sm; + + return sm_offset; +} + +static int gr_gv11b_handle_l1_tag_exception(struct gk20a *g, u32 gpc, u32 tpc, + bool *post_event, struct channel_gk20a *fault_ch, + u32 *hww_global_esr) +{ + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); + u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; + u32 l1_tag_ecc_status, l1_tag_ecc_corrected_err_status = 0; + u32 l1_tag_ecc_uncorrected_err_status = 0; + u32 l1_tag_corrected_err_count_delta = 0; + u32 l1_tag_uncorrected_err_count_delta = 0; + bool is_l1_tag_ecc_corrected_total_err_overflow = 0; + bool is_l1_tag_ecc_uncorrected_total_err_overflow = 0; + + /* Check for L1 tag ECC errors. */ + l1_tag_ecc_status = gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_r() + offset); + l1_tag_ecc_corrected_err_status = l1_tag_ecc_status & + (gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_el1_0_m() | + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_el1_1_m() | + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_pixrpf_m() | + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_miss_fifo_m()); + l1_tag_ecc_uncorrected_err_status = l1_tag_ecc_status & + (gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_el1_0_m() | + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_el1_1_m() | + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_pixrpf_m() | + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_miss_fifo_m()); + + if ((l1_tag_ecc_corrected_err_status == 0) && (l1_tag_ecc_uncorrected_err_status == 0)) + return 0; + + l1_tag_corrected_err_count_delta = + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_total_v( + gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_r() + + offset)); + l1_tag_uncorrected_err_count_delta = + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_total_v( + gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_r() + + offset)); + is_l1_tag_ecc_corrected_total_err_overflow = + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_total_counter_overflow_v(l1_tag_ecc_status); + is_l1_tag_ecc_uncorrected_total_err_overflow = + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_total_counter_overflow_v(l1_tag_ecc_status); + + if ((l1_tag_corrected_err_count_delta > 0) || is_l1_tag_ecc_corrected_total_err_overflow) { + gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + "corrected error (SBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", + l1_tag_ecc_corrected_err_status, is_l1_tag_ecc_corrected_total_err_overflow); + + /* HW uses 16-bits counter */ + l1_tag_corrected_err_count_delta += + (is_l1_tag_ecc_corrected_total_err_overflow << + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_total_s()); + g->ecc.gr.t19x.sm_l1_tag_corrected_err_count.counters[tpc] += + l1_tag_corrected_err_count_delta; + gk20a_writel(g, + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_r() + offset, + 0); + } + if ((l1_tag_uncorrected_err_count_delta > 0) || is_l1_tag_ecc_uncorrected_total_err_overflow) { + gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + "Uncorrected error (DBE) detected in SM L1 tag! err_mask [%08x] is_overf [%d]", + l1_tag_ecc_uncorrected_err_status, is_l1_tag_ecc_uncorrected_total_err_overflow); + + /* HW uses 16-bits counter */ + l1_tag_uncorrected_err_count_delta += + (is_l1_tag_ecc_uncorrected_total_err_overflow << + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_total_s()); + g->ecc.gr.t19x.sm_l1_tag_uncorrected_err_count.counters[tpc] += + l1_tag_uncorrected_err_count_delta; + gk20a_writel(g, + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_r() + offset, + 0); + } + + gk20a_writel(g, gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_r() + offset, + gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_reset_task_f()); + + return 0; + +} + +static int gr_gv11b_handle_lrf_exception(struct gk20a *g, u32 gpc, u32 tpc, + bool *post_event, struct channel_gk20a *fault_ch, + u32 *hww_global_esr) +{ + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); + u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; + u32 lrf_ecc_status, lrf_ecc_corrected_err_status = 0; + u32 lrf_ecc_uncorrected_err_status = 0; + u32 lrf_corrected_err_count_delta = 0; + u32 lrf_uncorrected_err_count_delta = 0; + bool is_lrf_ecc_corrected_total_err_overflow = 0; + bool is_lrf_ecc_uncorrected_total_err_overflow = 0; + + /* Check for LRF ECC errors. */ + lrf_ecc_status = gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r() + offset); + lrf_ecc_corrected_err_status = lrf_ecc_status & + (gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp0_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp1_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp2_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp3_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp4_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp5_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp6_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp7_m()); + lrf_ecc_uncorrected_err_status = lrf_ecc_status & + (gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp0_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp1_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp2_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp3_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp4_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp5_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp6_m() | + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp7_m()); + + if ((lrf_ecc_corrected_err_status == 0) && (lrf_ecc_uncorrected_err_status == 0)) + return 0; + + lrf_corrected_err_count_delta = + gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_v( + gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_r() + + offset)); + lrf_uncorrected_err_count_delta = + gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_v( + gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_r() + + offset)); + is_lrf_ecc_corrected_total_err_overflow = + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_total_counter_overflow_v(lrf_ecc_status); + is_lrf_ecc_uncorrected_total_err_overflow = + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(lrf_ecc_status); + + if ((lrf_corrected_err_count_delta > 0) || is_lrf_ecc_corrected_total_err_overflow) { + gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + "corrected error (SBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", + lrf_ecc_corrected_err_status, is_lrf_ecc_corrected_total_err_overflow); + + /* HW uses 16-bits counter */ + lrf_corrected_err_count_delta += + (is_lrf_ecc_corrected_total_err_overflow << + gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_s()); + g->ecc.gr.t18x.sm_lrf_single_err_count.counters[tpc] += + lrf_corrected_err_count_delta; + gk20a_writel(g, + gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_r() + offset, + 0); + } + if ((lrf_uncorrected_err_count_delta > 0) || is_lrf_ecc_uncorrected_total_err_overflow) { + gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + "Uncorrected error (DBE) detected in SM LRF! err_mask [%08x] is_overf [%d]", + lrf_ecc_uncorrected_err_status, is_lrf_ecc_uncorrected_total_err_overflow); + + /* HW uses 16-bits counter */ + lrf_uncorrected_err_count_delta += + (is_lrf_ecc_uncorrected_total_err_overflow << + gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_s()); + g->ecc.gr.t18x.sm_lrf_double_err_count.counters[tpc] += + lrf_uncorrected_err_count_delta; + gk20a_writel(g, + gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_r() + offset, + 0); + } + + gk20a_writel(g, gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r() + offset, + gr_pri_gpc0_tpc0_sm_lrf_ecc_status_reset_task_f()); + + return 0; + +} + +void gr_gv11b_enable_hww_exceptions(struct gk20a *g) +{ + /* enable exceptions */ + gk20a_writel(g, gr_fe_hww_esr_r(), + gr_fe_hww_esr_en_enable_f() | + gr_fe_hww_esr_reset_active_f()); + gk20a_writel(g, gr_memfmt_hww_esr_r(), + gr_memfmt_hww_esr_en_enable_f() | + gr_memfmt_hww_esr_reset_active_f()); +} + +void gr_gv11b_enable_exceptions(struct gk20a *g) +{ + struct gr_gk20a *gr = &g->gr; + u32 reg_val; + + /* + * clear exceptions : + * other than SM : hww_esr are reset in *enable_hww_excetpions* + * SM : cleared in *set_hww_esr_report_mask* + */ + + /* enable exceptions */ + gk20a_writel(g, gr_exception2_en_r(), 0x0); /* BE not enabled */ + gk20a_writel(g, gr_exception1_en_r(), (1 << gr->gpc_count) - 1); + + reg_val = gr_exception_en_fe_enabled_f() | + gr_exception_en_memfmt_enabled_f() | + gr_exception_en_ds_enabled_f() | + gr_exception_en_gpc_enabled_f(); + gk20a_writel(g, gr_exception_en_r(), reg_val); + +} + +static int gr_gv11b_handle_cbu_exception(struct gk20a *g, u32 gpc, u32 tpc, + bool *post_event, struct channel_gk20a *fault_ch, + u32 *hww_global_esr) +{ + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); + u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; + u32 cbu_ecc_status, cbu_ecc_corrected_err_status = 0; + u32 cbu_ecc_uncorrected_err_status = 0; + u32 cbu_corrected_err_count_delta = 0; + u32 cbu_uncorrected_err_count_delta = 0; + bool is_cbu_ecc_corrected_total_err_overflow = 0; + bool is_cbu_ecc_uncorrected_total_err_overflow = 0; + + /* Check for CBU ECC errors. */ + cbu_ecc_status = gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_cbu_ecc_status_r() + offset); + cbu_ecc_corrected_err_status = cbu_ecc_status & + (gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm0_m() | + gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm1_m() | + gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm0_m() | + gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm1_m()); + cbu_ecc_uncorrected_err_status = cbu_ecc_status & + (gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm0_m() | + gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm1_m() | + gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm0_m() | + gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm1_m()); + + if ((cbu_ecc_corrected_err_status == 0) && (cbu_ecc_uncorrected_err_status == 0)) + return 0; + + cbu_corrected_err_count_delta = + gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_v( + gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_r() + + offset)); + cbu_uncorrected_err_count_delta = + gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_v( + gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_r() + + offset)); + is_cbu_ecc_corrected_total_err_overflow = + gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_total_counter_overflow_v(cbu_ecc_status); + is_cbu_ecc_uncorrected_total_err_overflow = + gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(cbu_ecc_status); + + if ((cbu_corrected_err_count_delta > 0) || is_cbu_ecc_corrected_total_err_overflow) { + gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + "corrected error (SBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", + cbu_ecc_corrected_err_status, is_cbu_ecc_corrected_total_err_overflow); + + /* HW uses 16-bits counter */ + cbu_corrected_err_count_delta += + (is_cbu_ecc_corrected_total_err_overflow << + gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_s()); + g->ecc.gr.t19x.sm_cbu_corrected_err_count.counters[tpc] += + cbu_corrected_err_count_delta; + gk20a_writel(g, + gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_r() + offset, + 0); + } + if ((cbu_uncorrected_err_count_delta > 0) || is_cbu_ecc_uncorrected_total_err_overflow) { + gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + "Uncorrected error (DBE) detected in SM CBU! err_mask [%08x] is_overf [%d]", + cbu_ecc_uncorrected_err_status, is_cbu_ecc_uncorrected_total_err_overflow); + + /* HW uses 16-bits counter */ + cbu_uncorrected_err_count_delta += + (is_cbu_ecc_uncorrected_total_err_overflow << + gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_s()); + g->ecc.gr.t19x.sm_cbu_uncorrected_err_count.counters[tpc] += + cbu_uncorrected_err_count_delta; + gk20a_writel(g, + gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_r() + offset, + 0); + } + + gk20a_writel(g, gr_pri_gpc0_tpc0_sm_cbu_ecc_status_r() + offset, + gr_pri_gpc0_tpc0_sm_cbu_ecc_status_reset_task_f()); + + return 0; + +} + +static int gr_gv11b_handle_l1_data_exception(struct gk20a *g, u32 gpc, u32 tpc, + bool *post_event, struct channel_gk20a *fault_ch, + u32 *hww_global_esr) +{ + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); + u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; + u32 l1_data_ecc_status, l1_data_ecc_corrected_err_status = 0; + u32 l1_data_ecc_uncorrected_err_status = 0; + u32 l1_data_corrected_err_count_delta = 0; + u32 l1_data_uncorrected_err_count_delta = 0; + bool is_l1_data_ecc_corrected_total_err_overflow = 0; + bool is_l1_data_ecc_uncorrected_total_err_overflow = 0; + + /* Check for L1 data ECC errors. */ + l1_data_ecc_status = gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_r() + offset); + l1_data_ecc_corrected_err_status = l1_data_ecc_status & + (gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_0_m() | + gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_1_m()); + l1_data_ecc_uncorrected_err_status = l1_data_ecc_status & + (gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_0_m() | + gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_1_m()); + + if ((l1_data_ecc_corrected_err_status == 0) && (l1_data_ecc_uncorrected_err_status == 0)) + return 0; + + l1_data_corrected_err_count_delta = + gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_v( + gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_r() + + offset)); + l1_data_uncorrected_err_count_delta = + gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_v( + gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_r() + + offset)); + is_l1_data_ecc_corrected_total_err_overflow = + gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_total_counter_overflow_v(l1_data_ecc_status); + is_l1_data_ecc_uncorrected_total_err_overflow = + gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(l1_data_ecc_status); + + if ((l1_data_corrected_err_count_delta > 0) || is_l1_data_ecc_corrected_total_err_overflow) { + gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + "corrected error (SBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", + l1_data_ecc_corrected_err_status, is_l1_data_ecc_corrected_total_err_overflow); + + /* HW uses 16-bits counter */ + l1_data_corrected_err_count_delta += + (is_l1_data_ecc_corrected_total_err_overflow << + gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_s()); + g->ecc.gr.t19x.sm_l1_data_corrected_err_count.counters[tpc] += + l1_data_corrected_err_count_delta; + gk20a_writel(g, + gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_r() + offset, + 0); + } + if ((l1_data_uncorrected_err_count_delta > 0) || is_l1_data_ecc_uncorrected_total_err_overflow) { + gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + "Uncorrected error (DBE) detected in SM L1 data! err_mask [%08x] is_overf [%d]", + l1_data_ecc_uncorrected_err_status, is_l1_data_ecc_uncorrected_total_err_overflow); + + /* HW uses 16-bits counter */ + l1_data_uncorrected_err_count_delta += + (is_l1_data_ecc_uncorrected_total_err_overflow << + gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_s()); + g->ecc.gr.t19x.sm_l1_data_uncorrected_err_count.counters[tpc] += + l1_data_uncorrected_err_count_delta; + gk20a_writel(g, + gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_r() + offset, + 0); + } + + gk20a_writel(g, gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_r() + offset, + gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_reset_task_f()); + + return 0; + +} + +static int gr_gv11b_handle_icache_exception(struct gk20a *g, u32 gpc, u32 tpc, + bool *post_event, struct channel_gk20a *fault_ch, + u32 *hww_global_esr) +{ + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_TPC_IN_GPC_STRIDE); + u32 offset = gpc_stride * gpc + tpc_in_gpc_stride * tpc; + u32 icache_ecc_status, icache_ecc_corrected_err_status = 0; + u32 icache_ecc_uncorrected_err_status = 0; + u32 icache_corrected_err_count_delta = 0; + u32 icache_uncorrected_err_count_delta = 0; + bool is_icache_ecc_corrected_total_err_overflow = 0; + bool is_icache_ecc_uncorrected_total_err_overflow = 0; + + /* Check for L0 && L1 icache ECC errors. */ + icache_ecc_status = gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_icache_ecc_status_r() + offset); + icache_ecc_corrected_err_status = icache_ecc_status & + (gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l0_data_m() | + gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l0_predecode_m() | + gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l1_data_m() | + gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l1_predecode_m()); + icache_ecc_uncorrected_err_status = icache_ecc_status & + (gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l0_data_m() | + gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l0_predecode_m() | + gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l1_data_m() | + gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l1_predecode_m()); + + if ((icache_ecc_corrected_err_status == 0) && (icache_ecc_uncorrected_err_status == 0)) + return 0; + + icache_corrected_err_count_delta = + gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_total_v( + gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_r() + + offset)); + icache_uncorrected_err_count_delta = + gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_total_v( + gk20a_readl(g, + gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_r() + + offset)); + is_icache_ecc_corrected_total_err_overflow = + gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_total_counter_overflow_v(icache_ecc_status); + is_icache_ecc_uncorrected_total_err_overflow = + gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_total_counter_overflow_v(icache_ecc_status); + + if ((icache_corrected_err_count_delta > 0) || is_icache_ecc_corrected_total_err_overflow) { + gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + "corrected error (SBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", + icache_ecc_corrected_err_status, is_icache_ecc_corrected_total_err_overflow); + + /* HW uses 16-bits counter */ + icache_corrected_err_count_delta += + (is_icache_ecc_corrected_total_err_overflow << + gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_total_s()); + g->ecc.gr.t19x.sm_icache_corrected_err_count.counters[tpc] += + icache_corrected_err_count_delta; + gk20a_writel(g, + gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_r() + offset, + 0); + } + if ((icache_uncorrected_err_count_delta > 0) || is_icache_ecc_uncorrected_total_err_overflow) { + gk20a_dbg(gpu_dbg_fn | gpu_dbg_intr, + "Uncorrected error (DBE) detected in SM L0 && L1 icache! err_mask [%08x] is_overf [%d]", + icache_ecc_uncorrected_err_status, is_icache_ecc_uncorrected_total_err_overflow); + + /* HW uses 16-bits counter */ + icache_uncorrected_err_count_delta += + (is_icache_ecc_uncorrected_total_err_overflow << + gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_total_s()); + g->ecc.gr.t19x.sm_icache_uncorrected_err_count.counters[tpc] += + icache_uncorrected_err_count_delta; + gk20a_writel(g, + gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_r() + offset, + 0); + } + + gk20a_writel(g, gr_pri_gpc0_tpc0_sm_icache_ecc_status_r() + offset, + gr_pri_gpc0_tpc0_sm_icache_ecc_status_reset_task_f()); + + return 0; + +} + +int gr_gv11b_handle_tpc_sm_ecc_exception(struct gk20a *g, + u32 gpc, u32 tpc, + bool *post_event, struct channel_gk20a *fault_ch, + u32 *hww_global_esr) +{ + int ret = 0; + + /* Check for L1 tag ECC errors. */ + gr_gv11b_handle_l1_tag_exception(g, gpc, tpc, post_event, fault_ch, hww_global_esr); + + /* Check for LRF ECC errors. */ + gr_gv11b_handle_lrf_exception(g, gpc, tpc, post_event, fault_ch, hww_global_esr); + + /* Check for CBU ECC errors. */ + gr_gv11b_handle_cbu_exception(g, gpc, tpc, post_event, fault_ch, hww_global_esr); + + /* Check for L1 data ECC errors. */ + gr_gv11b_handle_l1_data_exception(g, gpc, tpc, post_event, fault_ch, hww_global_esr); + + /* Check for L0 && L1 icache ECC errors. */ + gr_gv11b_handle_icache_exception(g, gpc, tpc, post_event, fault_ch, hww_global_esr); + + return ret; +} + +int gr_gv11b_handle_gcc_exception(struct gk20a *g, u32 gpc, u32 tpc, + bool *post_event, struct channel_gk20a *fault_ch, + u32 *hww_global_esr) +{ + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + u32 offset = gpc_stride * gpc; + u32 gcc_l15_ecc_status, gcc_l15_ecc_corrected_err_status = 0; + u32 gcc_l15_ecc_uncorrected_err_status = 0; + u32 gcc_l15_corrected_err_count_delta = 0; + u32 gcc_l15_uncorrected_err_count_delta = 0; + bool is_gcc_l15_ecc_corrected_total_err_overflow = 0; + bool is_gcc_l15_ecc_uncorrected_total_err_overflow = 0; + + /* Check for gcc l15 ECC errors. */ + gcc_l15_ecc_status = gk20a_readl(g, + gr_pri_gpc0_gcc_l15_ecc_status_r() + offset); + gcc_l15_ecc_corrected_err_status = gcc_l15_ecc_status & + (gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_bank0_m() | + gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_bank1_m()); + gcc_l15_ecc_uncorrected_err_status = gcc_l15_ecc_status & + (gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_bank0_m() | + gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_bank1_m()); + + if ((gcc_l15_ecc_corrected_err_status == 0) && (gcc_l15_ecc_uncorrected_err_status == 0)) + return 0; + + gcc_l15_corrected_err_count_delta = + gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_total_v( + gk20a_readl(g, + gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_r() + + offset)); + gcc_l15_uncorrected_err_count_delta = + gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_total_v( + gk20a_readl(g, + gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_r() + + offset)); + is_gcc_l15_ecc_corrected_total_err_overflow = + gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_total_counter_overflow_v(gcc_l15_ecc_status); + is_gcc_l15_ecc_uncorrected_total_err_overflow = + gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_total_counter_overflow_v(gcc_l15_ecc_status); + + if ((gcc_l15_corrected_err_count_delta > 0) || is_gcc_l15_ecc_corrected_total_err_overflow) { + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, + "corrected error (SBE) detected in GCC L1.5! err_mask [%08x] is_overf [%d]", + gcc_l15_ecc_corrected_err_status, is_gcc_l15_ecc_corrected_total_err_overflow); + + /* HW uses 16-bits counter */ + gcc_l15_corrected_err_count_delta += + (is_gcc_l15_ecc_corrected_total_err_overflow << + gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_total_s()); + g->ecc.gr.t19x.gcc_l15_corrected_err_count.counters[gpc] += + gcc_l15_corrected_err_count_delta; + gk20a_writel(g, + gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_r() + offset, + 0); + } + if ((gcc_l15_uncorrected_err_count_delta > 0) || is_gcc_l15_ecc_uncorrected_total_err_overflow) { + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_intr, + "Uncorrected error (DBE) detected in GCC L1.5! err_mask [%08x] is_overf [%d]", + gcc_l15_ecc_uncorrected_err_status, is_gcc_l15_ecc_uncorrected_total_err_overflow); + + /* HW uses 16-bits counter */ + gcc_l15_uncorrected_err_count_delta += + (is_gcc_l15_ecc_uncorrected_total_err_overflow << + gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_total_s()); + g->ecc.gr.t19x.gcc_l15_uncorrected_err_count.counters[gpc] += + gcc_l15_uncorrected_err_count_delta; + gk20a_writel(g, + gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_r() + offset, + 0); + } + + gk20a_writel(g, gr_pri_gpc0_gcc_l15_ecc_status_r() + offset, + gr_pri_gpc0_gcc_l15_ecc_status_reset_task_f()); + + return 0; +} + +static int gr_gv11b_handle_gpcmmu_ecc_exception(struct gk20a *g, u32 gpc, + u32 exception) +{ + int ret = 0; + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + u32 offset = gpc_stride * gpc; + u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt; + u32 corrected_delta, uncorrected_delta; + u32 corrected_overflow, uncorrected_overflow; + int hww_esr; + + hww_esr = gk20a_readl(g, gr_gpc0_mmu_gpcmmu_global_esr_r() + offset); + + if (!(hww_esr & (gr_gpc0_mmu_gpcmmu_global_esr_ecc_corrected_m() | + gr_gpc0_mmu_gpcmmu_global_esr_ecc_uncorrected_m()))) + return ret; + + ecc_status = gk20a_readl(g, + gr_gpc0_mmu_l1tlb_ecc_status_r() + offset); + ecc_addr = gk20a_readl(g, + gr_gpc0_mmu_l1tlb_ecc_address_r() + offset); + corrected_cnt = gk20a_readl(g, + gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_r() + offset); + uncorrected_cnt = gk20a_readl(g, + gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_r() + offset); + + corrected_delta = gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_v( + corrected_cnt); + uncorrected_delta = gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_v( + uncorrected_cnt); + corrected_overflow = ecc_status & + gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_total_counter_overflow_m(); + + uncorrected_overflow = ecc_status & + gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_total_counter_overflow_m(); + + + /* clear the interrupt */ + if ((corrected_delta > 0) || corrected_overflow) + gk20a_writel(g, + gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_r() + + offset, 0); + if ((uncorrected_delta > 0) || uncorrected_overflow) + gk20a_writel(g, + gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_r() + + offset, 0); + + gk20a_writel(g, gr_gpc0_mmu_l1tlb_ecc_status_r() + offset, + gr_gpc0_mmu_l1tlb_ecc_status_reset_task_f()); + + /* Handle overflow */ + if (corrected_overflow) + corrected_delta += (0x1UL << gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_s()); + if (uncorrected_overflow) + uncorrected_delta += (0x1UL << gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_s()); + + + g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count.counters[gpc] += + corrected_delta; + g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count.counters[gpc] += + uncorrected_delta; + nvgpu_log(g, gpu_dbg_intr, + "mmu l1tlb gpc:%d ecc interrupt intr: 0x%x", gpc, hww_esr); + + if (ecc_status & gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_sa_data_m()) + nvgpu_log(g, gpu_dbg_intr, "corrected ecc sa data error"); + if (ecc_status & gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_sa_data_m()) + nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc sa data error"); + if (ecc_status & gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_fa_data_m()) + nvgpu_log(g, gpu_dbg_intr, "corrected ecc fa data error"); + if (ecc_status & gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_fa_data_m()) + nvgpu_log(g, gpu_dbg_intr, "uncorrected ecc fa data error"); + if (corrected_overflow || uncorrected_overflow) + nvgpu_info(g, "mmu l1tlb ecc counter overflow!"); + + nvgpu_log(g, gpu_dbg_intr, + "ecc error address: 0x%x", ecc_addr); + nvgpu_log(g, gpu_dbg_intr, + "ecc error count corrected: %d, uncorrected %d", + g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count.counters[gpc], + g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count.counters[gpc]); + + return ret; +} + +static int gr_gv11b_handle_gpccs_ecc_exception(struct gk20a *g, u32 gpc, + u32 exception) +{ + int ret = 0; + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + u32 offset = gpc_stride * gpc; + u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt; + u32 corrected_delta, uncorrected_delta; + u32 corrected_overflow, uncorrected_overflow; + int hww_esr; + + hww_esr = gk20a_readl(g, gr_gpc0_gpccs_hww_esr_r() + offset); + + if (!(hww_esr & (gr_gpc0_gpccs_hww_esr_ecc_uncorrected_m() | + gr_gpc0_gpccs_hww_esr_ecc_corrected_m()))) + return ret; + + ecc_status = gk20a_readl(g, + gr_gpc0_gpccs_falcon_ecc_status_r() + offset); + ecc_addr = gk20a_readl(g, + gr_gpc0_gpccs_falcon_ecc_address_r() + offset); + corrected_cnt = gk20a_readl(g, + gr_gpc0_gpccs_falcon_ecc_corrected_err_count_r() + offset); + uncorrected_cnt = gk20a_readl(g, + gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_r() + offset); + + corrected_delta = gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_v( + corrected_cnt); + uncorrected_delta = gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_total_v( + uncorrected_cnt); + corrected_overflow = ecc_status & + gr_gpc0_gpccs_falcon_ecc_status_corrected_err_total_counter_overflow_m(); + + uncorrected_overflow = ecc_status & + gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_total_counter_overflow_m(); + + + /* clear the interrupt */ + if ((corrected_delta > 0) || corrected_overflow) + gk20a_writel(g, + gr_gpc0_gpccs_falcon_ecc_corrected_err_count_r() + + offset, 0); + if ((uncorrected_delta > 0) || uncorrected_overflow) + gk20a_writel(g, + gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_r() + + offset, 0); + + gk20a_writel(g, gr_gpc0_gpccs_falcon_ecc_status_r() + offset, + gr_gpc0_gpccs_falcon_ecc_status_reset_task_f()); + + g->ecc.gr.t19x.gpccs_corrected_err_count.counters[gpc] += + corrected_delta; + g->ecc.gr.t19x.gpccs_uncorrected_err_count.counters[gpc] += + uncorrected_delta; + nvgpu_log(g, gpu_dbg_intr, + "gppcs gpc:%d ecc interrupt intr: 0x%x", gpc, hww_esr); + + if (ecc_status & gr_gpc0_gpccs_falcon_ecc_status_corrected_err_imem_m()) + nvgpu_log(g, gpu_dbg_intr, "imem ecc error corrected"); + if (ecc_status & + gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_imem_m()) + nvgpu_log(g, gpu_dbg_intr, "imem ecc error uncorrected"); + if (ecc_status & + gr_gpc0_gpccs_falcon_ecc_status_corrected_err_dmem_m()) + nvgpu_log(g, gpu_dbg_intr, "dmem ecc error corrected"); + if (ecc_status & + gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_m()) + nvgpu_log(g, gpu_dbg_intr, "dmem ecc error uncorrected"); + if (corrected_overflow || uncorrected_overflow) + nvgpu_info(g, "gpccs ecc counter overflow!"); + + nvgpu_log(g, gpu_dbg_intr, + "ecc error row address: 0x%x", + gr_gpc0_gpccs_falcon_ecc_address_row_address_v(ecc_addr)); + + nvgpu_log(g, gpu_dbg_intr, + "ecc error count corrected: %d, uncorrected %d", + g->ecc.gr.t19x.gpccs_corrected_err_count.counters[gpc], + g->ecc.gr.t19x.gpccs_uncorrected_err_count.counters[gpc]); + + return ret; +} + +int gr_gv11b_handle_gpc_gpcmmu_exception(struct gk20a *g, u32 gpc, + u32 gpc_exception) +{ + if (gpc_exception & gr_gpc0_gpccs_gpc_exception_gpcmmu_m()) + return gr_gv11b_handle_gpcmmu_ecc_exception(g, gpc, + gpc_exception); + return 0; +} + +int gr_gv11b_handle_gpc_gpccs_exception(struct gk20a *g, u32 gpc, + u32 gpc_exception) +{ + if (gpc_exception & gr_gpc0_gpccs_gpc_exception_gpccs_m()) + return gr_gv11b_handle_gpccs_ecc_exception(g, gpc, + gpc_exception); + + return 0; +} + +void gr_gv11b_enable_gpc_exceptions(struct gk20a *g) +{ + struct gr_gk20a *gr = &g->gr; + u32 tpc_mask; + + gk20a_writel(g, gr_gpcs_tpcs_tpccs_tpc_exception_en_r(), + gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f() | + gr_gpcs_tpcs_tpccs_tpc_exception_en_mpc_enabled_f()); + + tpc_mask = + gr_gpcs_gpccs_gpc_exception_en_tpc_f((1 << gr->tpc_count) - 1); + + gk20a_writel(g, gr_gpcs_gpccs_gpc_exception_en_r(), + (tpc_mask | gr_gpcs_gpccs_gpc_exception_en_gcc_f(1) | + gr_gpcs_gpccs_gpc_exception_en_gpccs_f(1) | + gr_gpcs_gpccs_gpc_exception_en_gpcmmu_f(1))); +} + +int gr_gv11b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, + bool *post_event) +{ + return 0; +} + +int gr_gv11b_zbc_s_query_table(struct gk20a *g, struct gr_gk20a *gr, + struct zbc_query_params *query_params) +{ + u32 index = query_params->index_size; + + if (index >= GK20A_ZBC_TABLE_SIZE) { + nvgpu_err(g, "invalid zbc stencil table index"); + return -EINVAL; + } + query_params->depth = gr->zbc_s_tbl[index].stencil; + query_params->format = gr->zbc_s_tbl[index].format; + query_params->ref_cnt = gr->zbc_s_tbl[index].ref_cnt; + + return 0; +} + +bool gr_gv11b_add_zbc_type_s(struct gk20a *g, struct gr_gk20a *gr, + struct zbc_entry *zbc_val, int *ret_val) +{ + struct zbc_s_table *s_tbl; + u32 i; + bool added = false; + + *ret_val = -ENOMEM; + + /* search existing tables */ + for (i = 0; i < gr->max_used_s_index; i++) { + + s_tbl = &gr->zbc_s_tbl[i]; + + if (s_tbl->ref_cnt && + s_tbl->stencil == zbc_val->depth && + s_tbl->format == zbc_val->format) { + added = true; + s_tbl->ref_cnt++; + *ret_val = 0; + break; + } + } + /* add new table */ + if (!added && + gr->max_used_s_index < GK20A_ZBC_TABLE_SIZE) { + + s_tbl = &gr->zbc_s_tbl[gr->max_used_s_index]; + WARN_ON(s_tbl->ref_cnt != 0); + + *ret_val = g->ops.gr.add_zbc_s(g, gr, + zbc_val, gr->max_used_s_index); + + if (!(*ret_val)) + gr->max_used_s_index++; + } + return added; +} + +int gr_gv11b_add_zbc_stencil(struct gk20a *g, struct gr_gk20a *gr, + struct zbc_entry *stencil_val, u32 index) +{ + u32 zbc_s; + + /* update l2 table */ + g->ops.ltc.set_zbc_s_entry(g, stencil_val, index); + + /* update local copy */ + gr->zbc_s_tbl[index].stencil = stencil_val->depth; + gr->zbc_s_tbl[index].format = stencil_val->format; + gr->zbc_s_tbl[index].ref_cnt++; + + gk20a_writel(g, gr_gpcs_swdx_dss_zbc_s_r(index), stencil_val->depth); + zbc_s = gk20a_readl(g, gr_gpcs_swdx_dss_zbc_s_01_to_04_format_r() + + (index & ~3)); + zbc_s &= ~(0x7f << (index % 4) * 7); + zbc_s |= stencil_val->format << (index % 4) * 7; + gk20a_writel(g, gr_gpcs_swdx_dss_zbc_s_01_to_04_format_r() + + (index & ~3), zbc_s); + + return 0; +} + +int gr_gv11b_load_stencil_default_tbl(struct gk20a *g, + struct gr_gk20a *gr) +{ + struct zbc_entry zbc_val; + u32 err; + + /* load default stencil table */ + zbc_val.type = GV11B_ZBC_TYPE_STENCIL; + + zbc_val.depth = 0x0; + zbc_val.format = ZBC_STENCIL_CLEAR_FMT_U8; + err = gr_gk20a_add_zbc(g, gr, &zbc_val); + + zbc_val.depth = 0x1; + zbc_val.format = ZBC_STENCIL_CLEAR_FMT_U8; + err |= gr_gk20a_add_zbc(g, gr, &zbc_val); + + zbc_val.depth = 0xff; + zbc_val.format = ZBC_STENCIL_CLEAR_FMT_U8; + err |= gr_gk20a_add_zbc(g, gr, &zbc_val); + + if (!err) { + gr->max_default_s_index = 3; + } else { + nvgpu_err(g, "fail to load default zbc stencil table"); + return err; + } + + return 0; +} + +int gr_gv11b_load_stencil_tbl(struct gk20a *g, struct gr_gk20a *gr) +{ + int ret; + u32 i; + + for (i = 0; i < gr->max_used_s_index; i++) { + struct zbc_s_table *s_tbl = &gr->zbc_s_tbl[i]; + struct zbc_entry zbc_val; + + zbc_val.type = GV11B_ZBC_TYPE_STENCIL; + zbc_val.depth = s_tbl->stencil; + zbc_val.format = s_tbl->format; + + ret = g->ops.gr.add_zbc_s(g, gr, &zbc_val, i); + if (ret) + return ret; + } + return 0; +} + +u32 gr_gv11b_pagepool_default_size(struct gk20a *g) +{ + return gr_scc_pagepool_total_pages_hwmax_value_v(); +} + +int gr_gv11b_calc_global_ctx_buffer_size(struct gk20a *g) +{ + struct gr_gk20a *gr = &g->gr; + int size; + + gr->attrib_cb_size = gr->attrib_cb_default_size; + gr->alpha_cb_size = gr->alpha_cb_default_size; + + gr->attrib_cb_size = min(gr->attrib_cb_size, + gr_gpc0_ppc0_cbm_beta_cb_size_v_f(~0) / g->gr.tpc_count); + gr->alpha_cb_size = min(gr->alpha_cb_size, + gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(~0) / g->gr.tpc_count); + + size = gr->attrib_cb_size * + gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v() * + gr->max_tpc_count; + + size += gr->alpha_cb_size * + gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v() * + gr->max_tpc_count; + + size = ALIGN(size, 128); + + return size; +} + +static void gr_gv11b_set_go_idle_timeout(struct gk20a *g, u32 data) +{ + gk20a_writel(g, gr_fe_go_idle_timeout_r(), data); +} + +static void gr_gv11b_set_coalesce_buffer_size(struct gk20a *g, u32 data) +{ + u32 val; + + gk20a_dbg_fn(""); + + val = gk20a_readl(g, gr_gpcs_tc_debug0_r()); + val = set_field(val, gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(), + gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(data)); + gk20a_writel(g, gr_gpcs_tc_debug0_r(), val); + + gk20a_dbg_fn("done"); +} + +static void gr_gv11b_set_tex_in_dbg(struct gk20a *g, u32 data) +{ + u32 val; + bool flag; + + gk20a_dbg_fn(""); + + val = gk20a_readl(g, gr_gpcs_tpcs_tex_in_dbg_r()); + flag = (data & NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE) ? 1 : 0; + val = set_field(val, gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_m(), + gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_f(flag)); + gk20a_writel(g, gr_gpcs_tpcs_tex_in_dbg_r(), val); + + val = gk20a_readl(g, gr_gpcs_tpcs_sm_l1tag_ctrl_r()); + flag = (data & + NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_LD) ? 1 : 0; + val = set_field(val, gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_m(), + gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_f(flag)); + flag = (data & + NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_ST) ? 1 : 0; + val = set_field(val, gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_m(), + gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_f(flag)); + gk20a_writel(g, gr_gpcs_tpcs_sm_l1tag_ctrl_r(), val); +} + +static void gr_gv11b_set_skedcheck(struct gk20a *g, u32 data) +{ + u32 reg_val; + + reg_val = gk20a_readl(g, gr_sked_hww_esr_en_r()); + + if ((data & NVC397_SET_SKEDCHECK_18_MASK) == + NVC397_SET_SKEDCHECK_18_DISABLE) { + reg_val = set_field(reg_val, + gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_m(), + gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_disabled_f() + ); + } else if ((data & NVC397_SET_SKEDCHECK_18_MASK) == + NVC397_SET_SKEDCHECK_18_ENABLE) { + reg_val = set_field(reg_val, + gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_m(), + gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_enabled_f() + ); + } + nvgpu_log_info(g, "sked_hww_esr_en = 0x%x", reg_val); + gk20a_writel(g, gr_sked_hww_esr_en_r(), reg_val); + +} + +static void gv11b_gr_set_shader_exceptions(struct gk20a *g, u32 data) +{ + gk20a_dbg_fn(""); + + if (data == NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE) { + gk20a_writel(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(), + 0); + gk20a_writel(g, gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r(), + 0); + } else { + g->ops.gr.set_hww_esr_report_mask(g); + } +} + +int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr, + u32 class_num, u32 offset, u32 data) +{ + gk20a_dbg_fn(""); + + if (class_num == VOLTA_COMPUTE_A) { + switch (offset << 2) { + case NVC0C0_SET_SHADER_EXCEPTIONS: + gv11b_gr_set_shader_exceptions(g, data); + break; + case NVC3C0_SET_SKEDCHECK: + gr_gv11b_set_skedcheck(g, data); + break; + default: + goto fail; + } + } + + if (class_num == VOLTA_A) { + switch (offset << 2) { + case NVC397_SET_SHADER_EXCEPTIONS: + gv11b_gr_set_shader_exceptions(g, data); + break; + case NVC397_SET_CIRCULAR_BUFFER_SIZE: + g->ops.gr.set_circular_buffer_size(g, data); + break; + case NVC397_SET_ALPHA_CIRCULAR_BUFFER_SIZE: + g->ops.gr.set_alpha_circular_buffer_size(g, data); + break; + case NVC397_SET_GO_IDLE_TIMEOUT: + gr_gv11b_set_go_idle_timeout(g, data); + break; + case NVC097_SET_COALESCE_BUFFER_SIZE: + gr_gv11b_set_coalesce_buffer_size(g, data); + break; + case NVC397_SET_TEX_IN_DBG: + gr_gv11b_set_tex_in_dbg(g, data); + break; + case NVC397_SET_SKEDCHECK: + gr_gv11b_set_skedcheck(g, data); + break; + case NVC397_SET_BES_CROP_DEBUG3: + g->ops.gr.set_bes_crop_debug3(g, data); + break; + default: + goto fail; + } + } + return 0; + +fail: + return -EINVAL; +} + +void gr_gv11b_bundle_cb_defaults(struct gk20a *g) +{ + struct gr_gk20a *gr = &g->gr; + + gr->bundle_cb_default_size = + gr_scc_bundle_cb_size_div_256b__prod_v(); + gr->min_gpm_fifo_depth = + gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(); + gr->bundle_cb_token_limit = + gr_pd_ab_dist_cfg2_token_limit_init_v(); +} + +void gr_gv11b_cb_size_default(struct gk20a *g) +{ + struct gr_gk20a *gr = &g->gr; + + if (!gr->attrib_cb_default_size) + gr->attrib_cb_default_size = + gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v(); + gr->alpha_cb_default_size = + gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(); +} + +void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data) +{ + struct gr_gk20a *gr = &g->gr; + u32 gpc_index, ppc_index, stride, val; + u32 pd_ab_max_output; + u32 alpha_cb_size = data * 4; + + gk20a_dbg_fn(""); + + if (alpha_cb_size > gr->alpha_cb_size) + alpha_cb_size = gr->alpha_cb_size; + + gk20a_writel(g, gr_ds_tga_constraintlogic_alpha_r(), + (gk20a_readl(g, gr_ds_tga_constraintlogic_alpha_r()) & + ~gr_ds_tga_constraintlogic_alpha_cbsize_f(~0)) | + gr_ds_tga_constraintlogic_alpha_cbsize_f(alpha_cb_size)); + + pd_ab_max_output = alpha_cb_size * + gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v() / + gr_pd_ab_dist_cfg1_max_output_granularity_v(); + + gk20a_writel(g, gr_pd_ab_dist_cfg1_r(), + gr_pd_ab_dist_cfg1_max_output_f(pd_ab_max_output) | + gr_pd_ab_dist_cfg1_max_batches_init_f()); + + for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { + stride = proj_gpc_stride_v() * gpc_index; + + for (ppc_index = 0; ppc_index < gr->gpc_ppc_count[gpc_index]; + ppc_index++) { + + val = gk20a_readl(g, gr_gpc0_ppc0_cbm_alpha_cb_size_r() + + stride + + proj_ppc_in_gpc_stride_v() * ppc_index); + + val = set_field(val, gr_gpc0_ppc0_cbm_alpha_cb_size_v_m(), + gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(alpha_cb_size * + gr->pes_tpc_count[ppc_index][gpc_index])); + + gk20a_writel(g, gr_gpc0_ppc0_cbm_alpha_cb_size_r() + + stride + + proj_ppc_in_gpc_stride_v() * ppc_index, val); + } + } +} + +void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data) +{ + struct gr_gk20a *gr = &g->gr; + u32 gpc_index, ppc_index, stride, val; + u32 cb_size_steady = data * 4, cb_size; + + gk20a_dbg_fn(""); + + if (cb_size_steady > gr->attrib_cb_size) + cb_size_steady = gr->attrib_cb_size; + if (gk20a_readl(g, gr_gpc0_ppc0_cbm_beta_cb_size_r()) != + gk20a_readl(g, + gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_r())) { + cb_size = cb_size_steady + + (gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v() - + gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v()); + } else { + cb_size = cb_size_steady; + } + + gk20a_writel(g, gr_ds_tga_constraintlogic_beta_r(), + (gk20a_readl(g, gr_ds_tga_constraintlogic_beta_r()) & + ~gr_ds_tga_constraintlogic_beta_cbsize_f(~0)) | + gr_ds_tga_constraintlogic_beta_cbsize_f(cb_size_steady)); + + for (gpc_index = 0; gpc_index < gr->gpc_count; gpc_index++) { + stride = proj_gpc_stride_v() * gpc_index; + + for (ppc_index = 0; ppc_index < gr->gpc_ppc_count[gpc_index]; + ppc_index++) { + + val = gk20a_readl(g, gr_gpc0_ppc0_cbm_beta_cb_size_r() + + stride + + proj_ppc_in_gpc_stride_v() * ppc_index); + + val = set_field(val, + gr_gpc0_ppc0_cbm_beta_cb_size_v_m(), + gr_gpc0_ppc0_cbm_beta_cb_size_v_f(cb_size * + gr->pes_tpc_count[ppc_index][gpc_index])); + + gk20a_writel(g, gr_gpc0_ppc0_cbm_beta_cb_size_r() + + stride + + proj_ppc_in_gpc_stride_v() * ppc_index, val); + + gk20a_writel(g, proj_ppc_in_gpc_stride_v() * ppc_index + + gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_r() + + stride, + gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_f( + cb_size_steady)); + + val = gk20a_readl(g, gr_gpcs_swdx_tc_beta_cb_size_r( + ppc_index + gpc_index)); + + val = set_field(val, + gr_gpcs_swdx_tc_beta_cb_size_v_m(), + gr_gpcs_swdx_tc_beta_cb_size_v_f( + cb_size_steady * + gr->gpc_ppc_count[gpc_index])); + + gk20a_writel(g, gr_gpcs_swdx_tc_beta_cb_size_r( + ppc_index + gpc_index), val); + } + } +} + +int gr_gv11b_alloc_buffer(struct vm_gk20a *vm, size_t size, + struct nvgpu_mem *mem) +{ + int err; + + gk20a_dbg_fn(""); + + err = nvgpu_dma_alloc_sys(vm->mm->g, size, mem); + if (err) + return err; + + mem->gpu_va = nvgpu_gmmu_map(vm, + mem, + size, + NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, + gk20a_mem_flag_none, + false, + mem->aperture); + + if (!mem->gpu_va) { + err = -ENOMEM; + goto fail_free; + } + + return 0; + +fail_free: + nvgpu_dma_free(vm->mm->g, mem); + return err; +} + +static void gr_gv11b_dump_gr_per_sm_regs(struct gk20a *g, + struct gk20a_debug_output *o, + u32 gpc, u32 tpc, u32 sm, u32 offset) +{ + + gk20a_debug_output(o, + "NV_PGRAPH_PRI_GPC%d_TPC%d_SM%d_HWW_WARP_ESR: 0x%x\n", + gpc, tpc, sm, gk20a_readl(g, + gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset)); + + gk20a_debug_output(o, + "NV_PGRAPH_PRI_GPC%d_TPC%d_SM%d_HWW_WARP_ESR_REPORT_MASK: 0x%x\n", + gpc, tpc, sm, gk20a_readl(g, + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_r() + offset)); + + gk20a_debug_output(o, + "NV_PGRAPH_PRI_GPC%d_TPC%d_SM%d_HWW_GLOBAL_ESR: 0x%x\n", + gpc, tpc, sm, gk20a_readl(g, + gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset)); + + gk20a_debug_output(o, + "NV_PGRAPH_PRI_GPC%d_TPC%d_SM%d_HWW_GLOBAL_ESR_REPORT_MASK: 0x%x\n", + gpc, tpc, sm, gk20a_readl(g, + gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_r() + offset)); + + gk20a_debug_output(o, + "NV_PGRAPH_PRI_GPC%d_TPC%d_SM%d_DBGR_CONTROL0: 0x%x\n", + gpc, tpc, sm, gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset)); + + gk20a_debug_output(o, + "NV_PGRAPH_PRI_GPC%d_TPC%d_SM%d_DBGR_STATUS0: 0x%x\n", + gpc, tpc, sm, gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_status0_r() + offset)); +} + +static int gr_gv11b_dump_gr_sm_regs(struct gk20a *g, + struct gk20a_debug_output *o) +{ + u32 gpc, tpc, sm, sm_per_tpc; + u32 gpc_offset, tpc_offset, offset; + + gk20a_debug_output(o, + "NV_PGRAPH_PRI_GPCS_TPCS_SMS_HWW_GLOBAL_ESR_REPORT_MASK: 0x%x\n", + gk20a_readl(g, + gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r())); + gk20a_debug_output(o, + "NV_PGRAPH_PRI_GPCS_TPCS_SMS_HWW_WARP_ESR_REPORT_MASK: 0x%x\n", + gk20a_readl(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r())); + gk20a_debug_output(o, + "NV_PGRAPH_PRI_GPCS_TPCS_SMS_HWW_GLOBAL_ESR: 0x%x\n", + gk20a_readl(g, gr_gpcs_tpcs_sms_hww_global_esr_r())); + gk20a_debug_output(o, + "NV_PGRAPH_PRI_GPCS_TPCS_SMS_DBGR_CONTROL0: 0x%x\n", + gk20a_readl(g, gr_gpcs_tpcs_sms_dbgr_control0_r())); + gk20a_debug_output(o, + "NV_PGRAPH_PRI_GPCS_TPCS_SMS_DBGR_STATUS0: 0x%x\n", + gk20a_readl(g, gr_gpcs_tpcs_sms_dbgr_status0_r())); + gk20a_debug_output(o, + "NV_PGRAPH_PRI_GPCS_TPCS_SMS_DBGR_BPT_PAUSE_MASK_0: 0x%x\n", + gk20a_readl(g, gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_0_r())); + gk20a_debug_output(o, + "NV_PGRAPH_PRI_GPCS_TPCS_SMS_DBGR_BPT_PAUSE_MASK_1: 0x%x\n", + gk20a_readl(g, gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_1_r())); + + sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); + for (gpc = 0; gpc < g->gr.gpc_count; gpc++) { + gpc_offset = gk20a_gr_gpc_offset(g, gpc); + + for (tpc = 0; tpc < g->gr.tpc_count; tpc++) { + tpc_offset = gk20a_gr_tpc_offset(g, tpc); + + for (sm = 0; sm < sm_per_tpc; sm++) { + offset = gpc_offset + tpc_offset + + gv11b_gr_sm_offset(g, sm); + + gr_gv11b_dump_gr_per_sm_regs(g, o, + gpc, tpc, sm, offset); + } + } + } + + return 0; +} + +int gr_gv11b_dump_gr_status_regs(struct gk20a *g, + struct gk20a_debug_output *o) +{ + struct gr_gk20a *gr = &g->gr; + u32 gr_engine_id; + + gr_engine_id = gk20a_fifo_get_gr_engine_id(g); + + gk20a_debug_output(o, "NV_PGRAPH_STATUS: 0x%x\n", + gk20a_readl(g, gr_status_r())); + gk20a_debug_output(o, "NV_PGRAPH_STATUS1: 0x%x\n", + gk20a_readl(g, gr_status_1_r())); + gk20a_debug_output(o, "NV_PGRAPH_STATUS2: 0x%x\n", + gk20a_readl(g, gr_status_2_r())); + gk20a_debug_output(o, "NV_PGRAPH_ENGINE_STATUS: 0x%x\n", + gk20a_readl(g, gr_engine_status_r())); + gk20a_debug_output(o, "NV_PGRAPH_GRFIFO_STATUS : 0x%x\n", + gk20a_readl(g, gr_gpfifo_status_r())); + gk20a_debug_output(o, "NV_PGRAPH_GRFIFO_CONTROL : 0x%x\n", + gk20a_readl(g, gr_gpfifo_ctl_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_HOST_INT_STATUS : 0x%x\n", + gk20a_readl(g, gr_fecs_host_int_status_r())); + gk20a_debug_output(o, "NV_PGRAPH_EXCEPTION : 0x%x\n", + gk20a_readl(g, gr_exception_r())); + gk20a_debug_output(o, "NV_PGRAPH_FECS_INTR : 0x%x\n", + gk20a_readl(g, gr_fecs_intr_r())); + gk20a_debug_output(o, "NV_PFIFO_ENGINE_STATUS(GR) : 0x%x\n", + gk20a_readl(g, fifo_engine_status_r(gr_engine_id))); + gk20a_debug_output(o, "NV_PGRAPH_ACTIVITY0: 0x%x\n", + gk20a_readl(g, gr_activity_0_r())); + gk20a_debug_output(o, "NV_PGRAPH_ACTIVITY1: 0x%x\n", + gk20a_readl(g, gr_activity_1_r())); + gk20a_debug_output(o, "NV_PGRAPH_ACTIVITY2: 0x%x\n", + gk20a_readl(g, gr_activity_2_r())); + gk20a_debug_output(o, "NV_PGRAPH_ACTIVITY4: 0x%x\n", + gk20a_readl(g, gr_activity_4_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_SKED_ACTIVITY: 0x%x\n", + gk20a_readl(g, gr_pri_sked_activity_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_GPC_ACTIVITY0: 0x%x\n", + gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity0_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_GPC_ACTIVITY1: 0x%x\n", + gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity1_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_GPC_ACTIVITY2: 0x%x\n", + gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity2_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_GPC_ACTIVITY3: 0x%x\n", + gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_activity3_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n", + gk20a_readl(g, gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r())); + if (gr->gpc_tpc_count[0] == 2) + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n", + gk20a_readl(g, gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPCS_TPCCS_TPC_ACTIVITY0: 0x%x\n", + gk20a_readl(g, gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_GPCCS_GPC_ACTIVITY0: 0x%x\n", + gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_0_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_GPCCS_GPC_ACTIVITY1: 0x%x\n", + gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_1_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_GPCCS_GPC_ACTIVITY2: 0x%x\n", + gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_2_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_GPCCS_GPC_ACTIVITY3: 0x%x\n", + gk20a_readl(g, gr_pri_gpcs_gpccs_gpc_activity_3_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC0_TPCCS_TPC_ACTIVITY0: 0x%x\n", + gk20a_readl(g, gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r())); + if (gr->gpc_tpc_count[0] == 2) + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPC1_TPCCS_TPC_ACTIVITY0: 0x%x\n", + gk20a_readl(g, gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPCS_TPCS_TPCCS_TPC_ACTIVITY0: 0x%x\n", + gk20a_readl(g, gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_BECS_BE_ACTIVITY0: 0x%x\n", + gk20a_readl(g, gr_pri_be0_becs_be_activity0_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_BE1_BECS_BE_ACTIVITY0: 0x%x\n", + gk20a_readl(g, gr_pri_be1_becs_be_activity0_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_BES_BECS_BE_ACTIVITY0: 0x%x\n", + gk20a_readl(g, gr_pri_bes_becs_be_activity0_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_DS_MPIPE_STATUS: 0x%x\n", + gk20a_readl(g, gr_pri_ds_mpipe_status_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_FE_GO_IDLE_TIMEOUT : 0x%x\n", + gk20a_readl(g, gr_fe_go_idle_timeout_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_FE_GO_IDLE_INFO : 0x%x\n", + gk20a_readl(g, gr_pri_fe_go_idle_info_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TEX_M_TEX_SUBUNITS_STATUS: 0x%x\n", + gk20a_readl(g, gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_CWD_FS: 0x%x\n", + gk20a_readl(g, gr_cwd_fs_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_FE_TPC_FS(0): 0x%x\n", + gk20a_readl(g, gr_fe_tpc_fs_r(0))); + gk20a_debug_output(o, "NV_PGRAPH_PRI_CWD_GPC_TPC_ID: 0x%x\n", + gk20a_readl(g, gr_cwd_gpc_tpc_id_r(0))); + gk20a_debug_output(o, "NV_PGRAPH_PRI_CWD_SM_ID(0): 0x%x\n", + gk20a_readl(g, gr_cwd_sm_id_r(0))); + gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_CTXSW_STATUS_FE_0: 0x%x\n", + gk20a_readl(g, gr_fecs_ctxsw_status_fe_0_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_CTXSW_STATUS_1: 0x%x\n", + gk20a_readl(g, gr_fecs_ctxsw_status_1_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_CTXSW_STATUS_GPC_0: 0x%x\n", + gk20a_readl(g, gr_gpc0_gpccs_ctxsw_status_gpc_0_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_CTXSW_STATUS_1: 0x%x\n", + gk20a_readl(g, gr_gpc0_gpccs_ctxsw_status_1_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_CTXSW_IDLESTATE : 0x%x\n", + gk20a_readl(g, gr_fecs_ctxsw_idlestate_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_CTXSW_IDLESTATE : 0x%x\n", + gk20a_readl(g, gr_gpc0_gpccs_ctxsw_idlestate_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_CURRENT_CTX : 0x%x\n", + gk20a_readl(g, gr_fecs_current_ctx_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_NEW_CTX : 0x%x\n", + gk20a_readl(g, gr_fecs_new_ctx_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_HOST_INT_ENABLE : 0x%x\n", + gk20a_readl(g, gr_fecs_host_int_enable_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_FECS_HOST_INT_STATUS : 0x%x\n", + gk20a_readl(g, gr_fecs_host_int_status_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_CROP_STATUS1 : 0x%x\n", + gk20a_readl(g, gr_pri_be0_crop_status1_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_BES_CROP_STATUS1 : 0x%x\n", + gk20a_readl(g, gr_pri_bes_crop_status1_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_ZROP_STATUS : 0x%x\n", + gk20a_readl(g, gr_pri_be0_zrop_status_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_ZROP_STATUS2 : 0x%x\n", + gk20a_readl(g, gr_pri_be0_zrop_status2_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_BES_ZROP_STATUS : 0x%x\n", + gk20a_readl(g, gr_pri_bes_zrop_status_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_BES_ZROP_STATUS2 : 0x%x\n", + gk20a_readl(g, gr_pri_bes_zrop_status2_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_BECS_BE_EXCEPTION: 0x%x\n", + gk20a_readl(g, gr_pri_be0_becs_be_exception_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_BE0_BECS_BE_EXCEPTION_EN: 0x%x\n", + gk20a_readl(g, gr_pri_be0_becs_be_exception_en_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_GPC_EXCEPTION: 0x%x\n", + gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_exception_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_GPCCS_GPC_EXCEPTION_EN: 0x%x\n", + gk20a_readl(g, gr_pri_gpc0_gpccs_gpc_exception_en_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TPCCS_TPC_EXCEPTION: 0x%x\n", + gk20a_readl(g, gr_pri_gpc0_tpc0_tpccs_tpc_exception_r())); + gk20a_debug_output(o, "NV_PGRAPH_PRI_GPC0_TPC0_TPCCS_TPC_EXCEPTION_EN: 0x%x\n", + gk20a_readl(g, gr_pri_gpc0_tpc0_tpccs_tpc_exception_en_r())); + + gr_gv11b_dump_gr_sm_regs(g, o); + + return 0; +} + +static bool gr_activity_empty_or_preempted(u32 val) +{ + while (val) { + u32 v = val & 7; + if (v != gr_activity_4_gpc0_empty_v() && + v != gr_activity_4_gpc0_preempted_v()) + return false; + val >>= 3; + } + + return true; +} + +int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms, + u32 expect_delay) +{ + u32 delay = expect_delay; + bool gr_enabled; + bool ctxsw_active; + bool gr_busy; + u32 gr_status; + u32 activity0, activity1, activity2, activity4; + struct nvgpu_timeout timeout; + + gk20a_dbg_fn(""); + + nvgpu_timeout_init(g, &timeout, duration_ms, NVGPU_TIMER_CPU_TIMER); + + do { + /* fmodel: host gets fifo_engine_status(gr) from gr + only when gr_status is read */ + gr_status = gk20a_readl(g, gr_status_r()); + + gr_enabled = gk20a_readl(g, mc_enable_r()) & + mc_enable_pgraph_enabled_f(); + + ctxsw_active = gr_status & 1<<7; + + activity0 = gk20a_readl(g, gr_activity_0_r()); + activity1 = gk20a_readl(g, gr_activity_1_r()); + activity2 = gk20a_readl(g, gr_activity_2_r()); + activity4 = gk20a_readl(g, gr_activity_4_r()); + + gr_busy = !(gr_activity_empty_or_preempted(activity0) && + gr_activity_empty_or_preempted(activity1) && + activity2 == 0 && + gr_activity_empty_or_preempted(activity4)); + + if (!gr_enabled || (!gr_busy && !ctxsw_active)) { + gk20a_dbg_fn("done"); + return 0; + } + + usleep_range(delay, delay * 2); + delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); + + } while (!nvgpu_timeout_expired(&timeout)); + + nvgpu_err(g, + "timeout, ctxsw busy : %d, gr busy : %d, %08x, %08x, %08x, %08x", + ctxsw_active, gr_busy, activity0, activity1, activity2, activity4); + + return -EAGAIN; +} + +void gr_gv11b_commit_global_attrib_cb(struct gk20a *g, + struct channel_ctx_gk20a *ch_ctx, + u64 addr, bool patch) +{ + struct gr_ctx_desc *gr_ctx = ch_ctx->gr_ctx; + int attrBufferSize; + + if (gr_ctx->t18x.preempt_ctxsw_buffer.gpu_va) + attrBufferSize = gr_ctx->t18x.betacb_ctxsw_buffer.size; + else + attrBufferSize = g->ops.gr.calc_global_ctx_buffer_size(g); + + attrBufferSize /= gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_granularity_f(); + + gr_gm20b_commit_global_attrib_cb(g, ch_ctx, addr, patch); + + gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_r(), + gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_v_f(addr) | + gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(), patch); + + gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_tpcs_tex_rm_cb_0_r(), + gr_gpcs_tpcs_tex_rm_cb_0_base_addr_43_12_f(addr), patch); + + gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_tpcs_tex_rm_cb_1_r(), + gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_f(attrBufferSize) | + gr_gpcs_tpcs_tex_rm_cb_1_valid_true_f(), patch); +} + +void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + tegra_fuse_writel(0x1, FUSE_FUSEBYPASS_0); + tegra_fuse_writel(0x0, FUSE_WRITE_ACCESS_SW_0); +#else + tegra_fuse_control_write(0x1, FUSE_FUSEBYPASS_0); + tegra_fuse_control_write(0x0, FUSE_WRITE_ACCESS_SW_0); +#endif + + if (g->gr.gpc_tpc_mask[gpc_index] == 0x1) + tegra_fuse_writel(0x2, FUSE_OPT_GPU_TPC0_DISABLE_0); + else if (g->gr.gpc_tpc_mask[gpc_index] == 0x2) + tegra_fuse_writel(0x1, FUSE_OPT_GPU_TPC0_DISABLE_0); + else + tegra_fuse_writel(0x0, FUSE_OPT_GPU_TPC0_DISABLE_0); +} + +void gr_gv11b_get_access_map(struct gk20a *g, + u32 **whitelist, int *num_entries) +{ + static u32 wl_addr_gv11b[] = { + /* this list must be sorted (low to high) */ + 0x404468, /* gr_pri_mme_max_instructions */ + 0x418300, /* gr_pri_gpcs_rasterarb_line_class */ + 0x418800, /* gr_pri_gpcs_setup_debug */ + 0x418e00, /* gr_pri_gpcs_swdx_config */ + 0x418e40, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */ + 0x418e44, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */ + 0x418e48, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */ + 0x418e4c, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */ + 0x418e50, /* gr_pri_gpcs_swdx_tc_bundle_ctrl */ + 0x418e58, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e5c, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e60, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e64, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e68, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e6c, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e70, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e74, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e78, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e7c, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e80, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e84, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e88, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e8c, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e90, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x418e94, /* gr_pri_gpcs_swdx_tc_bundle_addr */ + 0x419864, /* gr_pri_gpcs_tpcs_pe_l2_evict_policy */ + 0x419a04, /* gr_pri_gpcs_tpcs_tex_lod_dbg */ + 0x419a08, /* gr_pri_gpcs_tpcs_tex_samp_dbg */ + 0x419e84, /* gr_pri_gpcs_tpcs_sms_dbgr_control0 */ + 0x419ba4, /* gr_pri_gpcs_tpcs_sm_disp_ctrl */ + }; + + *whitelist = wl_addr_gv11b; + *num_entries = ARRAY_SIZE(wl_addr_gv11b); +} + +/* @brief pre-process work on the SM exceptions to determine if we clear them or not. + * + * On Pascal, if we are in CILP preemtion mode, preempt the channel and handle errors with special processing + */ +int gr_gv11b_pre_process_sm_exception(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr, + bool sm_debugger_attached, struct channel_gk20a *fault_ch, + bool *early_exit, bool *ignore_debugger) +{ + int ret; + bool cilp_enabled = false; + u32 global_mask = 0, dbgr_control0, global_esr_copy; + u32 offset = gk20a_gr_gpc_offset(g, gpc) + + gk20a_gr_tpc_offset(g, tpc) + + gv11b_gr_sm_offset(g, sm); + + *early_exit = false; + *ignore_debugger = false; + + if (fault_ch) + cilp_enabled = (fault_ch->ch_ctx.gr_ctx->compute_preempt_mode == + NVGPU_PREEMPTION_MODE_COMPUTE_CILP); + + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + "SM Exception received on gpc %d tpc %d sm %d = 0x%08x", + gpc, tpc, sm, global_esr); + + if (cilp_enabled && sm_debugger_attached) { + if (global_esr & gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f()) + gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, + gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f()); + + if (global_esr & gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f()) + gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, + gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f()); + + global_mask = gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_pending_f() | + gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_pending_f(); + + if (warp_esr != 0 || (global_esr & global_mask) != 0) { + *ignore_debugger = true; + + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + "CILP: starting wait for LOCKED_DOWN on " + "gpc %d tpc %d sm %d", + gpc, tpc, sm); + + if (gk20a_dbg_gpu_broadcast_stop_trigger(fault_ch)) { + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + "CILP: Broadcasting STOP_TRIGGER from " + "gpc %d tpc %d sm %d", + gpc, tpc, sm); + g->ops.gr.suspend_all_sms(g, + global_mask, false); + + gk20a_dbg_gpu_clear_broadcast_stop_trigger(fault_ch); + } else { + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + "CILP: STOP_TRIGGER from " + "gpc %d tpc %d sm %d", + gpc, tpc, sm); + g->ops.gr.suspend_single_sm(g, + gpc, tpc, sm, global_mask, true); + } + + /* reset the HWW errors after locking down */ + global_esr_copy = g->ops.gr.get_sm_hww_global_esr(g, + gpc, tpc, sm); + g->ops.gr.clear_sm_hww(g, + gpc, tpc, sm, global_esr_copy); + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + "CILP: HWWs cleared for " + "gpc %d tpc %d sm %d", + gpc, tpc, sm); + + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, "CILP: Setting CILP preempt pending\n"); + ret = gr_gp10b_set_cilp_preempt_pending(g, fault_ch); + if (ret) { + nvgpu_err(g, "CILP: error while setting CILP preempt pending!"); + return ret; + } + + dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); + if (dbgr_control0 & gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f()) { + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + "CILP: clearing SINGLE_STEP_MODE " + "before resume for gpc %d tpc %d sm %d", + gpc, tpc, sm); + dbgr_control0 = set_field(dbgr_control0, + gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_m(), + gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_disable_f()); + gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, dbgr_control0); + } + + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + "CILP: resume for gpc %d tpc %d sm %d", + gpc, tpc, sm); + g->ops.gr.resume_single_sm(g, gpc, tpc, sm); + + *ignore_debugger = true; + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + "CILP: All done on gpc %d, tpc %d sm %d", + gpc, tpc, sm); + } + + *early_exit = true; + } + return 0; +} + +static void gr_gv11b_handle_fecs_ecc_error(struct gk20a *g, u32 intr) +{ + u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt; + u32 corrected_delta, uncorrected_delta; + u32 corrected_overflow, uncorrected_overflow; + + if (intr & (gr_fecs_host_int_status_ecc_uncorrected_m() | + gr_fecs_host_int_status_ecc_corrected_m())) { + ecc_status = gk20a_readl(g, gr_fecs_falcon_ecc_status_r()); + ecc_addr = gk20a_readl(g, + gr_fecs_falcon_ecc_address_r()); + corrected_cnt = gk20a_readl(g, + gr_fecs_falcon_ecc_corrected_err_count_r()); + uncorrected_cnt = gk20a_readl(g, + gr_fecs_falcon_ecc_uncorrected_err_count_r()); + + corrected_delta = + gr_fecs_falcon_ecc_corrected_err_count_total_v( + corrected_cnt); + uncorrected_delta = + gr_fecs_falcon_ecc_uncorrected_err_count_total_v( + uncorrected_cnt); + + corrected_overflow = ecc_status & + gr_fecs_falcon_ecc_status_corrected_err_total_counter_overflow_m(); + uncorrected_overflow = ecc_status & + gr_fecs_falcon_ecc_status_uncorrected_err_total_counter_overflow_m(); + + /* clear the interrupt */ + if ((corrected_delta > 0) || corrected_overflow) + gk20a_writel(g, + gr_fecs_falcon_ecc_corrected_err_count_r(), 0); + if ((uncorrected_delta > 0) || uncorrected_overflow) + gk20a_writel(g, + gr_fecs_falcon_ecc_uncorrected_err_count_r(), + 0); + + + /* clear the interrupt */ + gk20a_writel(g, gr_fecs_falcon_ecc_uncorrected_err_count_r(), + 0); + gk20a_writel(g, gr_fecs_falcon_ecc_corrected_err_count_r(), 0); + + /* clear the interrupt */ + gk20a_writel(g, gr_fecs_falcon_ecc_status_r(), + gr_fecs_falcon_ecc_status_reset_task_f()); + + g->ecc.gr.t19x.fecs_corrected_err_count.counters[0] += + corrected_delta; + g->ecc.gr.t19x.fecs_uncorrected_err_count.counters[0] += + uncorrected_delta; + + nvgpu_log(g, gpu_dbg_intr, + "fecs ecc interrupt intr: 0x%x", intr); + + if (ecc_status & + gr_fecs_falcon_ecc_status_corrected_err_imem_m()) + nvgpu_log(g, gpu_dbg_intr, "imem ecc error corrected"); + if (ecc_status & + gr_fecs_falcon_ecc_status_uncorrected_err_imem_m()) + nvgpu_log(g, gpu_dbg_intr, + "imem ecc error uncorrected"); + if (ecc_status & + gr_fecs_falcon_ecc_status_corrected_err_dmem_m()) + nvgpu_log(g, gpu_dbg_intr, "dmem ecc error corrected"); + if (ecc_status & + gr_fecs_falcon_ecc_status_uncorrected_err_dmem_m()) + nvgpu_log(g, gpu_dbg_intr, + "dmem ecc error uncorrected"); + if (corrected_overflow || uncorrected_overflow) + nvgpu_info(g, "fecs ecc counter overflow!"); + + nvgpu_log(g, gpu_dbg_intr, + "ecc error row address: 0x%x", + gr_fecs_falcon_ecc_address_row_address_v(ecc_addr)); + + nvgpu_log(g, gpu_dbg_intr, + "ecc error count corrected: %d, uncorrected %d", + g->ecc.gr.t19x.fecs_corrected_err_count.counters[0], + g->ecc.gr.t19x.fecs_uncorrected_err_count.counters[0]); + } +} + +int gr_gv11b_handle_fecs_error(struct gk20a *g, + struct channel_gk20a *__ch, + struct gr_gk20a_isr_data *isr_data) +{ + u32 gr_fecs_intr = gk20a_readl(g, gr_fecs_host_int_status_r()); + int ret; + + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg | gpu_dbg_intr, ""); + + ret = gr_gp10b_handle_fecs_error(g, __ch, isr_data); + + /* Handle ECC errors */ + gr_gv11b_handle_fecs_ecc_error(g, gr_fecs_intr); + + return ret; +} + +int gr_gv11b_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr) +{ + u32 map; + u32 i, j, mapregs; + u32 num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS); + u32 num_tpc_per_gpc = nvgpu_get_litter_value(g, + GPU_LIT_NUM_TPC_PER_GPC); + + gk20a_dbg_fn(""); + + if (!gr->map_tiles) + return -1; + + gk20a_writel(g, gr_crstr_map_table_cfg_r(), + gr_crstr_map_table_cfg_row_offset_f(gr->map_row_offset) | + gr_crstr_map_table_cfg_num_entries_f(gr->tpc_count)); + + /* 6 tpc can be stored in one map register */ + mapregs = (num_gpcs * num_tpc_per_gpc + 5) / 6; + + for (i = 0, j = 0; i < mapregs; i++, j = j + 6) { + map = gr_crstr_gpc_map_tile0_f(gr->map_tiles[j]) | + gr_crstr_gpc_map_tile1_f(gr->map_tiles[j + 1]) | + gr_crstr_gpc_map_tile2_f(gr->map_tiles[j + 2]) | + gr_crstr_gpc_map_tile3_f(gr->map_tiles[j + 3]) | + gr_crstr_gpc_map_tile4_f(gr->map_tiles[j + 4]) | + gr_crstr_gpc_map_tile5_f(gr->map_tiles[j + 5]); + + gk20a_writel(g, gr_crstr_gpc_map_r(i), map); + gk20a_writel(g, gr_ppcs_wwdx_map_gpc_map_r(i), map); + gk20a_writel(g, gr_rstr2d_gpc_map_r(i), map); + } + + gk20a_writel(g, gr_ppcs_wwdx_map_table_cfg_r(), + gr_ppcs_wwdx_map_table_cfg_row_offset_f(gr->map_row_offset) | + gr_ppcs_wwdx_map_table_cfg_num_entries_f(gr->tpc_count)); + + for (i = 0, j = 1; i < gr_ppcs_wwdx_map_table_cfg_coeff__size_1_v(); + i++, j = j + 4) { + gk20a_writel(g, gr_ppcs_wwdx_map_table_cfg_coeff_r(i), + gr_ppcs_wwdx_map_table_cfg_coeff_0_mod_value_f( + ((1 << j) % gr->tpc_count)) | + gr_ppcs_wwdx_map_table_cfg_coeff_1_mod_value_f( + ((1 << (j + 1)) % gr->tpc_count)) | + gr_ppcs_wwdx_map_table_cfg_coeff_2_mod_value_f( + ((1 << (j + 2)) % gr->tpc_count)) | + gr_ppcs_wwdx_map_table_cfg_coeff_3_mod_value_f( + ((1 << (j + 3)) % gr->tpc_count))); + } + + gk20a_writel(g, gr_rstr2d_map_table_cfg_r(), + gr_rstr2d_map_table_cfg_row_offset_f(gr->map_row_offset) | + gr_rstr2d_map_table_cfg_num_entries_f(gr->tpc_count)); + + return 0; +} + +static int gv11b_write_bundle_veid_state(struct gk20a *g, u32 index) +{ + struct av_list_gk20a *sw_veid_bundle_init = + &g->gr.ctx_vars.sw_veid_bundle_init; + u32 j; + u32 num_subctx, err = 0; + + num_subctx = g->fifo.t19x.max_subctx_count; + + for (j = 0; j < num_subctx; j++) { + nvgpu_log_fn(g, "write bundle_address_r for subctx: %d", j); + gk20a_writel(g, gr_pipe_bundle_address_r(), + sw_veid_bundle_init->l[index].addr | + gr_pipe_bundle_address_veid_f(j)); + + err = gr_gk20a_wait_fe_idle(g, gk20a_get_gr_idle_timeout(g), + GR_IDLE_CHECK_DEFAULT); + } + return err; +} + +int gr_gv11b_init_sw_veid_bundle(struct gk20a *g) +{ + struct av_list_gk20a *sw_veid_bundle_init = + &g->gr.ctx_vars.sw_veid_bundle_init; + u32 i; + u32 last_bundle_data = 0; + u32 err = 0; + + for (i = 0; i < sw_veid_bundle_init->count; i++) { + nvgpu_log_fn(g, "veid bundle count: %d", i); + + if (i == 0 || last_bundle_data != + sw_veid_bundle_init->l[i].value) { + gk20a_writel(g, gr_pipe_bundle_data_r(), + sw_veid_bundle_init->l[i].value); + last_bundle_data = sw_veid_bundle_init->l[i].value; + nvgpu_log_fn(g, "last_bundle_data : 0x%08x", + last_bundle_data); + } + + if (gr_pipe_bundle_address_value_v( + sw_veid_bundle_init->l[i].addr) == GR_GO_IDLE_BUNDLE) { + nvgpu_log_fn(g, "go idle bundle"); + gk20a_writel(g, gr_pipe_bundle_address_r(), + sw_veid_bundle_init->l[i].addr); + err |= gr_gk20a_wait_idle(g, + gk20a_get_gr_idle_timeout(g), + GR_IDLE_CHECK_DEFAULT); + } else + err = gv11b_write_bundle_veid_state(g, i); + + if (err) { + nvgpu_err(g, "failed to init sw veid bundle"); + break; + } + } + return err; +} + +void gr_gv11b_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, + u32 *zcull_map_tiles) +{ + u32 val, i, j; + + gk20a_dbg_fn(""); + + for (i = 0, j = 0; i < (zcull_num_entries / 8); i++, j += 8) { + val = + gr_gpcs_zcull_sm_in_gpc_number_map_tile_0_f( + zcull_map_tiles[j+0]) | + gr_gpcs_zcull_sm_in_gpc_number_map_tile_1_f( + zcull_map_tiles[j+1]) | + gr_gpcs_zcull_sm_in_gpc_number_map_tile_2_f( + zcull_map_tiles[j+2]) | + gr_gpcs_zcull_sm_in_gpc_number_map_tile_3_f( + zcull_map_tiles[j+3]) | + gr_gpcs_zcull_sm_in_gpc_number_map_tile_4_f( + zcull_map_tiles[j+4]) | + gr_gpcs_zcull_sm_in_gpc_number_map_tile_5_f( + zcull_map_tiles[j+5]) | + gr_gpcs_zcull_sm_in_gpc_number_map_tile_6_f( + zcull_map_tiles[j+6]) | + gr_gpcs_zcull_sm_in_gpc_number_map_tile_7_f( + zcull_map_tiles[j+7]); + + gk20a_writel(g, gr_gpcs_zcull_sm_in_gpc_number_map_r(i), val); + } +} + +void gr_gv11b_detect_sm_arch(struct gk20a *g) +{ + u32 v = gk20a_readl(g, gr_gpc0_tpc0_sm_arch_r()); + + g->params.sm_arch_spa_version = + gr_gpc0_tpc0_sm_arch_spa_version_v(v); + g->params.sm_arch_sm_version = + gr_gpc0_tpc0_sm_arch_sm_version_v(v); + g->params.sm_arch_warp_count = + gr_gpc0_tpc0_sm_arch_warp_count_v(v); +} + +void gr_gv11b_program_sm_id_numbering(struct gk20a *g, + u32 gpc, u32 tpc, u32 smid) +{ + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, + GPU_LIT_TPC_IN_GPC_STRIDE); + u32 gpc_offset = gpc_stride * gpc; + u32 tpc_offset = tpc_in_gpc_stride * tpc; + u32 global_tpc_index = g->gr.sm_to_cluster[smid].global_tpc_index; + + gk20a_writel(g, gr_gpc0_tpc0_sm_cfg_r() + gpc_offset + tpc_offset, + gr_gpc0_tpc0_sm_cfg_tpc_id_f(global_tpc_index)); + gk20a_writel(g, gr_gpc0_gpm_pd_sm_id_r(tpc) + gpc_offset, + gr_gpc0_gpm_pd_sm_id_id_f(global_tpc_index)); + gk20a_writel(g, gr_gpc0_tpc0_pe_cfg_smid_r() + gpc_offset + tpc_offset, + gr_gpc0_tpc0_pe_cfg_smid_value_f(global_tpc_index)); +} + +int gr_gv11b_load_smid_config(struct gk20a *g) +{ + u32 *tpc_sm_id; + u32 i, j; + u32 tpc_index, gpc_index, tpc_id; + u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); + int num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS); + + tpc_sm_id = nvgpu_kcalloc(g, gr_cwd_sm_id__size_1_v(), sizeof(u32)); + if (!tpc_sm_id) + return -ENOMEM; + + /* Each NV_PGRAPH_PRI_CWD_GPC_TPC_ID can store 4 TPCs.*/ + for (i = 0; i <= ((g->gr.tpc_count-1) / 4); i++) { + u32 reg = 0; + u32 bit_stride = gr_cwd_gpc_tpc_id_gpc0_s() + + gr_cwd_gpc_tpc_id_tpc0_s(); + + for (j = 0; j < 4; j++) { + u32 sm_id; + u32 bits; + + tpc_id = (i << 2) + j; + sm_id = tpc_id * sm_per_tpc; + + if (sm_id >= g->gr.no_of_sm) + break; + + gpc_index = g->gr.sm_to_cluster[sm_id].gpc_index; + tpc_index = g->gr.sm_to_cluster[sm_id].tpc_index; + + bits = gr_cwd_gpc_tpc_id_gpc0_f(gpc_index) | + gr_cwd_gpc_tpc_id_tpc0_f(tpc_index); + reg |= bits << (j * bit_stride); + + tpc_sm_id[gpc_index + (num_gpcs * ((tpc_index & 4) + >> 2))] |= tpc_id << tpc_index * bit_stride; + } + gk20a_writel(g, gr_cwd_gpc_tpc_id_r(i), reg); + } + + for (i = 0; i < gr_cwd_sm_id__size_1_v(); i++) + gk20a_writel(g, gr_cwd_sm_id_r(i), tpc_sm_id[i]); + nvgpu_kfree(g, tpc_sm_id); + + return 0; +} + +int gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va) +{ + u32 addr_lo; + u32 addr_hi; + struct ctx_header_desc *ctx; + int err; + + gk20a_dbg_fn(""); + + err = gv11b_alloc_subctx_header(c); + if (err) + return err; + + err = gv11b_update_subctx_header(c, gpu_va); + if (err) + return err; + + ctx = &c->ch_ctx.ctx_header; + addr_lo = u64_lo32(ctx->mem.gpu_va) >> ram_in_base_shift_v(); + addr_hi = u64_hi32(ctx->mem.gpu_va); + + /* point this address to engine_wfi_ptr */ + nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_engine_wfi_target_w(), + ram_in_engine_cs_wfi_v() | + ram_in_engine_wfi_mode_f(ram_in_engine_wfi_mode_virtual_v()) | + ram_in_engine_wfi_ptr_lo_f(addr_lo)); + + nvgpu_mem_wr32(c->g, &c->inst_block, ram_in_engine_wfi_ptr_hi_w(), + ram_in_engine_wfi_ptr_hi_f(addr_hi)); + + return 0; +} + + + +int gr_gv11b_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c) +{ + struct channel_ctx_gk20a *ch_ctx = NULL; + u32 pd_ab_dist_cfg0; + u32 ds_debug; + u32 mpc_vtg_debug; + u32 pe_vaf; + u32 pe_vsc_vpc; + + gk20a_dbg_fn(""); + + pd_ab_dist_cfg0 = gk20a_readl(g, gr_pd_ab_dist_cfg0_r()); + ds_debug = gk20a_readl(g, gr_ds_debug_r()); + mpc_vtg_debug = gk20a_readl(g, gr_gpcs_tpcs_mpc_vtg_debug_r()); + + pe_vaf = gk20a_readl(g, gr_gpcs_tpcs_pe_vaf_r()); + pe_vsc_vpc = gk20a_readl(g, gr_gpcs_tpcs_pes_vsc_vpc_r()); + + pe_vaf = gr_gpcs_tpcs_pe_vaf_fast_mode_switch_true_f() | pe_vaf; + pe_vsc_vpc = gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f() | + pe_vsc_vpc; + pd_ab_dist_cfg0 = gr_pd_ab_dist_cfg0_timeslice_enable_en_f() | + pd_ab_dist_cfg0; + ds_debug = gr_ds_debug_timeslice_mode_enable_f() | ds_debug; + mpc_vtg_debug = gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_enabled_f() | + mpc_vtg_debug; + + gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_tpcs_pe_vaf_r(), pe_vaf, + false); + gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_tpcs_pes_vsc_vpc_r(), + pe_vsc_vpc, false); + gr_gk20a_ctx_patch_write(g, ch_ctx, gr_pd_ab_dist_cfg0_r(), + pd_ab_dist_cfg0, false); + gr_gk20a_ctx_patch_write(g, ch_ctx, gr_ds_debug_r(), ds_debug, false); + gr_gk20a_ctx_patch_write(g, ch_ctx, gr_gpcs_tpcs_mpc_vtg_debug_r(), + mpc_vtg_debug, false); + + return 0; +} + +void gr_gv11b_write_zcull_ptr(struct gk20a *g, + struct nvgpu_mem *mem, u64 gpu_va) +{ + u32 va_lo, va_hi; + + gpu_va = gpu_va >> 8; + va_lo = u64_lo32(gpu_va); + va_hi = u64_hi32(gpu_va); + nvgpu_mem_wr(g, mem, + ctxsw_prog_main_image_zcull_ptr_o(), va_lo); + nvgpu_mem_wr(g, mem, + ctxsw_prog_main_image_zcull_ptr_hi_o(), va_hi); +} + + +void gr_gv11b_write_pm_ptr(struct gk20a *g, + struct nvgpu_mem *mem, u64 gpu_va) +{ + u32 va_lo, va_hi; + + gpu_va = gpu_va >> 8; + va_lo = u64_lo32(gpu_va); + va_hi = u64_hi32(gpu_va); + nvgpu_mem_wr(g, mem, + ctxsw_prog_main_image_pm_ptr_o(), va_lo); + nvgpu_mem_wr(g, mem, + ctxsw_prog_main_image_pm_ptr_hi_o(), va_hi); +} + +void gr_gv11b_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine) +{ + u32 gate_ctrl; + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_ELCG)) + return; + + gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(engine)); + + switch (mode) { + case ELCG_RUN: + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_clk_m(), + therm_gate_ctrl_eng_clk_run_f()); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_idle_holdoff_m(), + therm_gate_ctrl_idle_holdoff_on_f()); + break; + case ELCG_STOP: + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_clk_m(), + therm_gate_ctrl_eng_clk_stop_f()); + break; + case ELCG_AUTO: + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_clk_m(), + therm_gate_ctrl_eng_clk_auto_f()); + break; + default: + nvgpu_err(g, "invalid elcg mode %d", mode); + } + + gk20a_writel(g, therm_gate_ctrl_r(engine), gate_ctrl); +} + +void gr_gv11b_load_tpc_mask(struct gk20a *g) +{ + u32 pes_tpc_mask = 0, fuse_tpc_mask; + u32 gpc, pes, val; + u32 num_tpc_per_gpc = nvgpu_get_litter_value(g, + GPU_LIT_NUM_TPC_PER_GPC); + + /* gv11b has 1 GPC and 4 TPC/GPC, so mask will not overflow u32 */ + for (gpc = 0; gpc < g->gr.gpc_count; gpc++) { + for (pes = 0; pes < g->gr.pe_count_per_gpc; pes++) { + pes_tpc_mask |= g->gr.pes_tpc_mask[pes][gpc] << + num_tpc_per_gpc * gpc; + } + } + + gk20a_dbg_info("pes_tpc_mask %u\n", pes_tpc_mask); + fuse_tpc_mask = g->ops.gr.get_gpc_tpc_mask(g, gpc); + if (g->tpc_fs_mask_user && + g->tpc_fs_mask_user != fuse_tpc_mask && + fuse_tpc_mask == (0x1U << g->gr.max_tpc_count) - 1U) { + val = g->tpc_fs_mask_user; + val &= (0x1U << g->gr.max_tpc_count) - 1U; + val = (0x1U << hweight32(val)) - 1U; + gk20a_writel(g, gr_fe_tpc_fs_r(0), val); + } else { + gk20a_writel(g, gr_fe_tpc_fs_r(0), pes_tpc_mask); + } + +} + +void gr_gv11b_set_preemption_buffer_va(struct gk20a *g, + struct nvgpu_mem *mem, u64 gpu_va) +{ + u32 addr_lo, addr_hi; + + addr_lo = u64_lo32(gpu_va); + addr_hi = u64_hi32(gpu_va); + + nvgpu_mem_wr(g, mem, + ctxsw_prog_main_image_full_preemption_ptr_o(), addr_lo); + nvgpu_mem_wr(g, mem, + ctxsw_prog_main_image_full_preemption_ptr_hi_o(), addr_hi); + + nvgpu_mem_wr(g, mem, + ctxsw_prog_main_image_full_preemption_ptr_veid0_o(), addr_lo); + nvgpu_mem_wr(g, mem, + ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_o(), + addr_hi); + +} + +int gr_gv11b_init_fs_state(struct gk20a *g) +{ + u32 data; + + gk20a_dbg_fn(""); + + data = gk20a_readl(g, gr_gpcs_tpcs_sm_texio_control_r()); + data = set_field(data, gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(), + gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_arm_63_48_match_f()); + gk20a_writel(g, gr_gpcs_tpcs_sm_texio_control_r(), data); + + data = gk20a_readl(g, gr_gpcs_tpcs_sm_disp_ctrl_r()); + data = set_field(data, gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_m(), + gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_disable_f()); + gk20a_writel(g, gr_gpcs_tpcs_sm_disp_ctrl_r(), data); + + if (g->gr.t18x.fecs_feature_override_ecc_val != 0) { + gk20a_writel(g, + gr_fecs_feature_override_ecc_r(), + g->gr.t18x.fecs_feature_override_ecc_val); + } + + return gr_gm20b_init_fs_state(g); +} + +void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc, + u32 *esr_sm_sel) +{ + u32 reg_val; + u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); + + reg_val = gk20a_readl(g, gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r() + offset); + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + "sm tpc esr sm sel reg val: 0x%x", reg_val); + *esr_sm_sel = 0; + if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(reg_val)) + *esr_sm_sel = 1; + if (gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(reg_val)) + *esr_sm_sel |= 1 << 1; + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + "esr_sm_sel bitmask: 0x%x", *esr_sm_sel); +} + +int gv11b_gr_sm_trigger_suspend(struct gk20a *g) +{ + u32 dbgr_control0; + + /* assert stop trigger. uniformity assumption: all SMs will have + * the same state in dbg_control0. + */ + dbgr_control0 = + gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r()); + dbgr_control0 |= gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f(); + + /* broadcast write */ + gk20a_writel(g, + gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0); + + gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + "stop trigger enable: broadcast dbgr_control0: 0x%x ", + dbgr_control0); + + return 0; +} + +void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state) +{ + /* Check if we have at least one valid warp + * get paused state on maxwell + */ + struct gr_gk20a *gr = &g->gr; + u32 gpc, tpc, sm, sm_id; + u32 offset; + u64 warps_valid = 0, warps_paused = 0, warps_trapped = 0; + + for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { + gpc = g->gr.sm_to_cluster[sm_id].gpc_index; + tpc = g->gr.sm_to_cluster[sm_id].tpc_index; + sm = g->gr.sm_to_cluster[sm_id].sm_index; + + offset = gk20a_gr_gpc_offset(g, gpc) + + gk20a_gr_tpc_offset(g, tpc) + + gv11b_gr_sm_offset(g, sm); + + /* 64 bit read */ + warps_valid = (u64)gk20a_readl(g, + gr_gpc0_tpc0_sm0_warp_valid_mask_1_r() + + offset) << 32; + warps_valid |= gk20a_readl(g, + gr_gpc0_tpc0_sm0_warp_valid_mask_0_r() + + offset); + + /* 64 bit read */ + warps_paused = (u64)gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_1_r() + + offset) << 32; + warps_paused |= gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_0_r() + + offset); + + /* 64 bit read */ + warps_trapped = (u64)gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_1_r() + + offset) << 32; + warps_trapped |= gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_0_r() + + offset); + + w_state[sm_id].valid_warps[0] = warps_valid; + w_state[sm_id].trapped_warps[0] = warps_trapped; + w_state[sm_id].paused_warps[0] = warps_paused; + } + + + /* Only for debug purpose */ + for (sm_id = 0; sm_id < gr->no_of_sm; sm_id++) { + gk20a_dbg_fn("w_state[%d].valid_warps[0]: %llx\n", + sm_id, w_state[sm_id].valid_warps[0]); + gk20a_dbg_fn("w_state[%d].valid_warps[1]: %llx\n", + sm_id, w_state[sm_id].valid_warps[1]); + + gk20a_dbg_fn("w_state[%d].trapped_warps[0]: %llx\n", + sm_id, w_state[sm_id].trapped_warps[0]); + gk20a_dbg_fn("w_state[%d].trapped_warps[1]: %llx\n", + sm_id, w_state[sm_id].trapped_warps[1]); + + gk20a_dbg_fn("w_state[%d].paused_warps[0]: %llx\n", + sm_id, w_state[sm_id].paused_warps[0]); + gk20a_dbg_fn("w_state[%d].paused_warps[1]: %llx\n", + sm_id, w_state[sm_id].paused_warps[1]); + } +} + +int gv11b_gr_update_sm_error_state(struct gk20a *g, + struct channel_gk20a *ch, u32 sm_id, + struct nvgpu_gr_sm_error_state *sm_error_state) +{ + u32 gpc, tpc, sm, offset; + struct gr_gk20a *gr = &g->gr; + struct channel_ctx_gk20a *ch_ctx = &ch->ch_ctx; + int err = 0; + + nvgpu_mutex_acquire(&g->dbg_sessions_lock); + + gr->sm_error_states[sm_id].hww_global_esr = + sm_error_state->hww_global_esr; + gr->sm_error_states[sm_id].hww_warp_esr = + sm_error_state->hww_warp_esr; + gr->sm_error_states[sm_id].hww_warp_esr_pc = + sm_error_state->hww_warp_esr_pc; + gr->sm_error_states[sm_id].hww_global_esr_report_mask = + sm_error_state->hww_global_esr_report_mask; + gr->sm_error_states[sm_id].hww_warp_esr_report_mask = + sm_error_state->hww_warp_esr_report_mask; + + err = gr_gk20a_disable_ctxsw(g); + if (err) { + nvgpu_err(g, "unable to stop gr ctxsw"); + goto fail; + } + + gpc = g->gr.sm_to_cluster[sm_id].gpc_index; + tpc = g->gr.sm_to_cluster[sm_id].tpc_index; + sm = g->gr.sm_to_cluster[sm_id].sm_index; + + offset = gk20a_gr_gpc_offset(g, gpc) + + gk20a_gr_tpc_offset(g, tpc) + + gv11b_gr_sm_offset(g, sm); + + if (gk20a_is_channel_ctx_resident(ch)) { + gk20a_writel(g, + gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, + gr->sm_error_states[sm_id].hww_global_esr); + gk20a_writel(g, + gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset, + gr->sm_error_states[sm_id].hww_warp_esr); + gk20a_writel(g, + gr_gpc0_tpc0_sm0_hww_warp_esr_pc_r() + offset, + gr->sm_error_states[sm_id].hww_warp_esr_pc); + gk20a_writel(g, + gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_r() + offset, + gr->sm_error_states[sm_id].hww_global_esr_report_mask); + gk20a_writel(g, + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_r() + offset, + gr->sm_error_states[sm_id].hww_warp_esr_report_mask); + } else { + err = gr_gk20a_ctx_patch_write_begin(g, ch_ctx, false); + if (err) + goto enable_ctxsw; + + gr_gk20a_ctx_patch_write(g, ch_ctx, + gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r() + + offset, + gr->sm_error_states[sm_id].hww_global_esr_report_mask, + true); + gr_gk20a_ctx_patch_write(g, ch_ctx, + gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r() + + offset, + gr->sm_error_states[sm_id].hww_warp_esr_report_mask, + true); + + gr_gk20a_ctx_patch_write_end(g, ch_ctx, false); + } + +enable_ctxsw: + err = gr_gk20a_enable_ctxsw(g); + +fail: + nvgpu_mutex_release(&g->dbg_sessions_lock); + return err; +} + +int gv11b_gr_set_sm_debug_mode(struct gk20a *g, + struct channel_gk20a *ch, u64 sms, bool enable) +{ + struct nvgpu_dbg_gpu_reg_op *ops; + unsigned int i = 0, sm_id; + int err; + + ops = nvgpu_kcalloc(g, g->gr.no_of_sm, sizeof(*ops)); + if (!ops) + return -ENOMEM; + for (sm_id = 0; sm_id < g->gr.no_of_sm; sm_id++) { + u32 gpc, tpc, sm; + u32 reg_offset, reg_mask, reg_val; + + if (!(sms & (1 << sm_id))) + continue; + + gpc = g->gr.sm_to_cluster[sm_id].gpc_index; + tpc = g->gr.sm_to_cluster[sm_id].tpc_index; + sm = g->gr.sm_to_cluster[sm_id].sm_index; + + reg_offset = gk20a_gr_gpc_offset(g, gpc) + + gk20a_gr_tpc_offset(g, tpc) + + gv11b_gr_sm_offset(g, sm); + + ops[i].op = REGOP(WRITE_32); + ops[i].type = REGOP(TYPE_GR_CTX); + ops[i].offset = gr_gpc0_tpc0_sm0_dbgr_control0_r() + reg_offset; + + reg_mask = 0; + reg_val = 0; + if (enable) { + nvgpu_log(g, gpu_dbg_gpu_dbg, + "SM:%d debuggger mode ON", sm); + reg_mask |= + gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_m(); + reg_val |= + gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_f(); + } else { + nvgpu_log(g, gpu_dbg_gpu_dbg, + "SM:%d debuggger mode Off", sm); + reg_mask |= + gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_m(); + reg_val |= + gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_f(); + } + + ops[i].and_n_mask_lo = reg_mask; + ops[i].value_lo = reg_val; + i++; + } + + err = gr_gk20a_exec_ctx_ops(ch, ops, i, i, 0); + if (err) + nvgpu_err(g, "Failed to access register\n"); + nvgpu_kfree(g, ops); + return err; +} + +int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc) +{ + int sm_id; + struct gr_gk20a *gr = &g->gr; + u32 offset, sm, sm_per_tpc; + u32 gpc_tpc_offset; + + nvgpu_mutex_acquire(&g->dbg_sessions_lock); + + sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); + gpc_tpc_offset = gk20a_gr_gpc_offset(g, gpc) + + gk20a_gr_tpc_offset(g, tpc); + + sm_id = gr_gpc0_tpc0_sm_cfg_tpc_id_v(gk20a_readl(g, + gr_gpc0_tpc0_sm_cfg_r() + gpc_tpc_offset)); + + sm = sm_id % sm_per_tpc; + + offset = gpc_tpc_offset + gv11b_gr_sm_offset(g, sm); + + gr->sm_error_states[sm_id].hww_global_esr = gk20a_readl(g, + gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset); + + gr->sm_error_states[sm_id].hww_warp_esr = gk20a_readl(g, + gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset); + + gr->sm_error_states[sm_id].hww_warp_esr_pc = gk20a_readl(g, + gr_gpc0_tpc0_sm0_hww_warp_esr_pc_r() + offset); + + gr->sm_error_states[sm_id].hww_global_esr_report_mask = gk20a_readl(g, + gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_r() + offset); + + gr->sm_error_states[sm_id].hww_warp_esr_report_mask = gk20a_readl(g, + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_r() + offset); + + nvgpu_mutex_release(&g->dbg_sessions_lock); + + return 0; +} + +void gv11b_gr_set_hww_esr_report_mask(struct gk20a *g) +{ + + /* clear hww */ + gk20a_writel(g, gr_gpcs_tpcs_sms_hww_global_esr_r(), 0xffffffff); + gk20a_writel(g, gr_gpcs_tpcs_sms_hww_global_esr_r(), 0xffffffff); + + /* setup sm warp esr report masks */ + gk20a_writel(g, gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(), + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_error_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_api_stack_error_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_wrap_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_pc_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_overflow_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_reg_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_encoding_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_param_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_reg_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_addr_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_addr_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_addr_space_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_const_addr_ldc_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_overflow_report_f() | + gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_mmu_fault_report_f()); + + /* setup sm global esr report mask. vat_alarm_report is not enabled */ + gk20a_writel(g, gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r(), + gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_multiple_warp_errors_report_f()); +} + +bool gv11b_gr_sm_debugger_attached(struct gk20a *g) +{ + u32 debugger_mode; + u32 dbgr_control0 = gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r()); + + /* check if sm debugger is attached. + * assumption: all SMs will have debug mode enabled/disabled + * uniformly. + */ + debugger_mode = + gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(dbgr_control0); + gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + "SM Debugger Mode: %d", debugger_mode); + if (debugger_mode == + gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v()) + return true; + + return false; +} + +void gv11b_gr_suspend_single_sm(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm, + u32 global_esr_mask, bool check_errors) +{ + int err; + u32 dbgr_control0; + u32 offset = gk20a_gr_gpc_offset(g, gpc) + + gk20a_gr_tpc_offset(g, tpc) + + gv11b_gr_sm_offset(g, sm); + + /* if an SM debugger isn't attached, skip suspend */ + if (!g->ops.gr.sm_debugger_attached(g)) { + nvgpu_err(g, + "SM debugger not attached, skipping suspend!"); + return; + } + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, + "suspending gpc:%d, tpc:%d, sm%d", gpc, tpc, sm); + + /* assert stop trigger. */ + dbgr_control0 = gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); + dbgr_control0 |= gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f(); + gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, + dbgr_control0); + + err = g->ops.gr.wait_for_sm_lock_down(g, gpc, tpc, sm, + global_esr_mask, check_errors); + if (err) { + nvgpu_err(g, + "SuspendSm failed"); + return; + } +} + +void gv11b_gr_suspend_all_sms(struct gk20a *g, + u32 global_esr_mask, bool check_errors) +{ + struct gr_gk20a *gr = &g->gr; + u32 gpc, tpc, sm; + int err; + u32 dbgr_control0; + u32 sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); + + /* if an SM debugger isn't attached, skip suspend */ + if (!g->ops.gr.sm_debugger_attached(g)) { + nvgpu_err(g, + "SM debugger not attached, skipping suspend!"); + return; + } + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "suspending all sms"); + + /* assert stop trigger. uniformity assumption: all SMs will have + * the same state in dbg_control0. + */ + dbgr_control0 = + gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r()); + dbgr_control0 |= gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f(); + + /* broadcast write */ + gk20a_writel(g, + gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0); + + for (gpc = 0; gpc < gr->gpc_count; gpc++) { + for (tpc = 0; tpc < gr_gk20a_get_tpc_count(gr, gpc); tpc++) { + for (sm = 0; sm < sm_per_tpc; sm++) { + err = g->ops.gr.wait_for_sm_lock_down(g, + gpc, tpc, sm, + global_esr_mask, check_errors); + if (err) { + nvgpu_err(g, + "SuspendAllSms failed"); + return; + } + } + } + } +} + +void gv11b_gr_resume_single_sm(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm) +{ + u32 dbgr_control0, dbgr_status0; + u32 offset; + /* + * The following requires some clarification. Despite the fact that both + * RUN_TRIGGER and STOP_TRIGGER have the word "TRIGGER" in their + * names, only one is actually a trigger, and that is the STOP_TRIGGER. + * Merely writing a 1(_TASK) to the RUN_TRIGGER is not sufficient to + * resume the gpu - the _STOP_TRIGGER must explicitly be set to 0 + * (_DISABLE) as well. + + * Advice from the arch group: Disable the stop trigger first, as a + * separate operation, in order to ensure that the trigger has taken + * effect, before enabling the run trigger. + */ + + offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) + + gv11b_gr_sm_offset(g, sm); + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, + "resuming gpc:%d, tpc:%d, sm%d", gpc, tpc, sm); + dbgr_control0 = gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); + dbgr_status0 = gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_status0_r() + offset); + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, + "before stop trigger disable: " + "dbgr_control0 = 0x%x dbgr_status0: 0x%x", + dbgr_control0, dbgr_status0); + + /*De-assert stop trigger */ + dbgr_control0 = set_field(dbgr_control0, + gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_m(), + gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_disable_f()); + gk20a_writel(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + + offset, dbgr_control0); + + dbgr_control0 = gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); + dbgr_status0 = gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_status0_r() + offset); + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, + "before run trigger: " + "dbgr_control0 = 0x%x dbgr_status0: 0x%x", + dbgr_control0, dbgr_status0); + /* Run trigger */ + dbgr_control0 |= + gr_gpc0_tpc0_sm0_dbgr_control0_run_trigger_task_f(); + gk20a_writel(g, + gr_gpc0_tpc0_sm0_dbgr_control0_r() + + offset, dbgr_control0); + + dbgr_control0 = gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); + dbgr_status0 = gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_status0_r() + offset); + /* run trigger is not sticky bit. SM clears it immediately */ + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, + "after run trigger: " + "dbgr_control0 = 0x%x dbgr_status0: 0x%x", + dbgr_control0, dbgr_status0); + +} + +void gv11b_gr_resume_all_sms(struct gk20a *g) +{ + u32 dbgr_control0, dbgr_status0; + /* + * The following requires some clarification. Despite the fact that both + * RUN_TRIGGER and STOP_TRIGGER have the word "TRIGGER" in their + * names, only one is actually a trigger, and that is the STOP_TRIGGER. + * Merely writing a 1(_TASK) to the RUN_TRIGGER is not sufficient to + * resume the gpu - the _STOP_TRIGGER must explicitly be set to 0 + * (_DISABLE) as well. + + * Advice from the arch group: Disable the stop trigger first, as a + * separate operation, in order to ensure that the trigger has taken + * effect, before enabling the run trigger. + */ + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "resuming all sms"); + + /* Read from unicast registers */ + dbgr_control0 = + gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r()); + dbgr_status0 = + gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_status0_r()); + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, + "before stop trigger disable: " + "dbgr_control0 = 0x%x dbgr_status0: 0x%x", + dbgr_control0, dbgr_status0); + + dbgr_control0 = set_field(dbgr_control0, + gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_m(), + gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_disable_f()); + /* Write to broadcast registers */ + gk20a_writel(g, + gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0); + + /* Read from unicast registers */ + dbgr_control0 = + gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r()); + dbgr_status0 = + gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_status0_r()); + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, + "before run trigger: " + "dbgr_control0 = 0x%x dbgr_status0: 0x%x", + dbgr_control0, dbgr_status0); + /* Run trigger */ + dbgr_control0 |= + gr_gpc0_tpc0_sm0_dbgr_control0_run_trigger_task_f(); + /* Write to broadcast registers */ + gk20a_writel(g, + gr_gpcs_tpcs_sms_dbgr_control0_r(), dbgr_control0); + + /* Read from unicast registers */ + dbgr_control0 = + gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r()); + dbgr_status0 = + gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_status0_r()); + /* run trigger is not sticky bit. SM clears it immediately */ + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, + "after run trigger: " + "dbgr_control0 = 0x%x dbgr_status0: 0x%x", + dbgr_control0, dbgr_status0); +} + +int gv11b_gr_resume_from_pause(struct gk20a *g) +{ + int err = 0; + u32 reg_val; + + /* Clear the pause mask to tell the GPU we want to resume everyone */ + gk20a_writel(g, gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_0_r(), 0); + + /* explicitly re-enable forwarding of SM interrupts upon any resume */ + reg_val = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_en_r()); + reg_val |= gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(); + + gk20a_writel(g, gr_gpcs_tpcs_tpccs_tpc_exception_en_r(), reg_val); + + g->ops.gr.resume_all_sms(g); + + return err; +} + +u32 gv11b_gr_get_sm_hww_warp_esr(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm) +{ + u32 offset = gk20a_gr_gpc_offset(g, gpc) + + gk20a_gr_tpc_offset(g, tpc) + + gv11b_gr_sm_offset(g, sm); + + u32 hww_warp_esr = gk20a_readl(g, + gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset); + return hww_warp_esr; +} + +u32 gv11b_gr_get_sm_hww_global_esr(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm) +{ + u32 offset = gk20a_gr_gpc_offset(g, gpc) + + gk20a_gr_tpc_offset(g, tpc) + + gv11b_gr_sm_offset(g, sm); + + u32 hww_global_esr = gk20a_readl(g, + gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset); + + return hww_global_esr; +} + +u32 gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g) +{ + /* + * These three interrupts don't require locking down the SM. They can + * be handled by usermode clients as they aren't fatal. Additionally, + * usermode clients may wish to allow some warps to execute while others + * are at breakpoints, as opposed to fatal errors where all warps should + * halt. + */ + u32 global_esr_mask = + gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f() | + gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_pending_f() | + gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f(); + + return global_esr_mask; +} + +static void gv11b_gr_sm_dump_warp_bpt_pause_trap_mask_regs(struct gk20a *g, + u32 offset, bool timeout) +{ + u64 warps_valid = 0, warps_paused = 0, warps_trapped = 0; + u32 dbgr_control0 = gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); + u32 dbgr_status0 = gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_status0_r() + offset); + /* 64 bit read */ + warps_valid = + (u64)gk20a_readl(g, gr_gpc0_tpc0_sm0_warp_valid_mask_1_r() + + offset) << 32; + warps_valid |= gk20a_readl(g, + gr_gpc0_tpc0_sm0_warp_valid_mask_0_r() + offset); + + /* 64 bit read */ + warps_paused = + (u64)gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_1_r() + + offset) << 32; + warps_paused |= gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_0_r() + offset); + + /* 64 bit read */ + warps_trapped = + (u64)gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_1_r() + + offset) << 32; + warps_trapped |= gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_0_r() + offset); + if (timeout) + nvgpu_err(g, + "STATUS0=0x%x CONTROL0=0x%x VALID_MASK=0x%llx " + "PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", + dbgr_status0, dbgr_control0, warps_valid, + warps_paused, warps_trapped); + else + gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + "STATUS0=0x%x CONTROL0=0x%x VALID_MASK=0x%llx " + "PAUSE_MASK=0x%llx TRAP_MASK=0x%llx\n", + dbgr_status0, dbgr_control0, warps_valid, + warps_paused, warps_trapped); +} + +int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm, + u32 global_esr_mask, bool check_errors) +{ + bool locked_down; + bool no_error_pending; + u32 delay = GR_IDLE_CHECK_DEFAULT; + bool mmu_debug_mode_enabled = g->ops.fb.is_debug_mode_enabled(g); + u32 dbgr_status0 = 0; + u32 warp_esr, global_esr; + struct nvgpu_timeout timeout; + u32 offset = gk20a_gr_gpc_offset(g, gpc) + + gk20a_gr_tpc_offset(g, tpc) + + gv11b_gr_sm_offset(g, sm); + + gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + "GPC%d TPC%d: locking down SM%d", gpc, tpc, sm); + + nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), + NVGPU_TIMER_CPU_TIMER); + + /* wait for the sm to lock down */ + do { + global_esr = g->ops.gr.get_sm_hww_global_esr(g, gpc, tpc, sm); + dbgr_status0 = gk20a_readl(g, + gr_gpc0_tpc0_sm0_dbgr_status0_r() + offset); + + warp_esr = g->ops.gr.get_sm_hww_warp_esr(g, gpc, tpc, sm); + + locked_down = + (gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_v(dbgr_status0) == + gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_true_v()); + no_error_pending = + check_errors && + (gr_gpc0_tpc0_sm0_hww_warp_esr_error_v(warp_esr) == + gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_v()) && + ((global_esr & global_esr_mask) == 0); + + if (locked_down) { + /* + * if SM reports locked down, it means that SM is idle and + * trapped and also that one of the these conditions are true + * 1) sm is nonempty and all valid warps are paused + * 2) sm is empty and held in trapped state due to stop trigger + * 3) sm is nonempty and some warps are not paused, but are + * instead held at RTT due to an "active" stop trigger + * Check for Paused warp mask != Valid + * warp mask after SM reports it is locked down in order to + * distinguish case 1 from case 3. When case 3 is detected, + * it implies a misprogrammed trap handler code, as all warps + * in the handler must promise to BPT.PAUSE instead of RTT + * whenever SR64 read in trap mode indicates stop trigger + * is asserted. + */ + gv11b_gr_sm_dump_warp_bpt_pause_trap_mask_regs(g, + offset, false); + } + + if (locked_down || no_error_pending) { + gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + "GPC%d TPC%d: locked down SM%d", gpc, tpc, sm); + return 0; + } + + /* if an mmu fault is pending and mmu debug mode is not + * enabled, the sm will never lock down. + */ + if (!mmu_debug_mode_enabled && + (g->ops.mm.mmu_fault_pending(g))) { + nvgpu_err(g, + "GPC%d TPC%d: mmu fault pending," + " SM%d will never lock down!", gpc, tpc, sm); + return -EFAULT; + } + + nvgpu_usleep_range(delay, delay * 2); + delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); + } while (!nvgpu_timeout_expired(&timeout)); + + nvgpu_err(g, "GPC%d TPC%d: timed out while trying to " + "lock down SM%d", gpc, tpc, sm); + gv11b_gr_sm_dump_warp_bpt_pause_trap_mask_regs(g, offset, true); + + return -ETIMEDOUT; +} + +int gv11b_gr_lock_down_sm(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm, u32 global_esr_mask, + bool check_errors) +{ + u32 dbgr_control0; + u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) + + gv11b_gr_sm_offset(g, sm); + + gk20a_dbg(gpu_dbg_intr | gpu_dbg_gpu_dbg, + "GPC%d TPC%d SM%d: assert stop trigger", gpc, tpc, sm); + + /* assert stop trigger */ + dbgr_control0 = + gk20a_readl(g, gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset); + dbgr_control0 |= gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f(); + gk20a_writel(g, + gr_gpc0_tpc0_sm0_dbgr_control0_r() + offset, dbgr_control0); + + return g->ops.gr.wait_for_sm_lock_down(g, gpc, tpc, sm, global_esr_mask, + check_errors); +} + +void gv11b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, + u32 global_esr) +{ + u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc) + + gv11b_gr_sm_offset(g, sm); + + gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + offset, + global_esr); + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + "Cleared HWW global esr, current reg val: 0x%x", + gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_global_esr_r() + + offset)); + + gk20a_writel(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + offset, 0); + gk20a_dbg(gpu_dbg_fn | gpu_dbg_gpu_dbg, + "Cleared HWW warp esr, current reg val: 0x%x", + gk20a_readl(g, gr_gpc0_tpc0_sm0_hww_warp_esr_r() + + offset)); +} + +int gr_gv11b_handle_tpc_mpc_exception(struct gk20a *g, + u32 gpc, u32 tpc, bool *post_event) +{ + u32 esr; + u32 offset = gk20a_gr_gpc_offset(g, gpc) + gk20a_gr_tpc_offset(g, tpc); + u32 tpc_exception = gk20a_readl(g, gr_gpc0_tpc0_tpccs_tpc_exception_r() + + offset); + + if (!(tpc_exception & gr_gpc0_tpc0_tpccs_tpc_exception_mpc_m())) + return 0; + + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, + "GPC%d TPC%d MPC exception", gpc, tpc); + + esr = gk20a_readl(g, gr_gpc0_tpc0_mpc_hww_esr_r() + offset); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, "mpc hww esr 0x%08x", esr); + + esr = gk20a_readl(g, gr_gpc0_tpc0_mpc_hww_esr_info_r() + offset); + nvgpu_log(g, gpu_dbg_intr | gpu_dbg_gpu_dbg, + "mpc hww esr info: veid 0x%08x", + gr_gpc0_tpc0_mpc_hww_esr_info_veid_v(esr)); + + gk20a_writel(g, gr_gpc0_tpc0_mpc_hww_esr_r() + offset, + gr_gpc0_tpc0_mpc_hww_esr_reset_trigger_f()); + + return 0; +} + +static const u32 _num_ovr_perf_regs = 20; +static u32 _ovr_perf_regs[20] = { 0, }; + +void gv11b_gr_init_ovr_sm_dsm_perf(void) +{ + if (_ovr_perf_regs[0] != 0) + return; + + _ovr_perf_regs[0] = gr_egpc0_etpc0_sm_dsm_perf_counter_control_sel0_r(); + _ovr_perf_regs[1] = gr_egpc0_etpc0_sm_dsm_perf_counter_control_sel1_r(); + _ovr_perf_regs[2] = gr_egpc0_etpc0_sm_dsm_perf_counter_control0_r(); + _ovr_perf_regs[3] = gr_egpc0_etpc0_sm_dsm_perf_counter_control1_r(); + _ovr_perf_regs[4] = gr_egpc0_etpc0_sm_dsm_perf_counter_control2_r(); + _ovr_perf_regs[5] = gr_egpc0_etpc0_sm_dsm_perf_counter_control3_r(); + _ovr_perf_regs[6] = gr_egpc0_etpc0_sm_dsm_perf_counter_control4_r(); + _ovr_perf_regs[7] = gr_egpc0_etpc0_sm_dsm_perf_counter_control5_r(); + _ovr_perf_regs[8] = gr_egpc0_etpc0_sm_dsm_perf_counter0_control_r(); + _ovr_perf_regs[9] = gr_egpc0_etpc0_sm_dsm_perf_counter1_control_r(); + _ovr_perf_regs[10] = gr_egpc0_etpc0_sm_dsm_perf_counter2_control_r(); + _ovr_perf_regs[11] = gr_egpc0_etpc0_sm_dsm_perf_counter3_control_r(); + _ovr_perf_regs[12] = gr_egpc0_etpc0_sm_dsm_perf_counter4_control_r(); + _ovr_perf_regs[13] = gr_egpc0_etpc0_sm_dsm_perf_counter5_control_r(); + _ovr_perf_regs[14] = gr_egpc0_etpc0_sm_dsm_perf_counter6_control_r(); + _ovr_perf_regs[15] = gr_egpc0_etpc0_sm_dsm_perf_counter7_control_r(); + + _ovr_perf_regs[16] = gr_egpc0_etpc0_sm0_dsm_perf_counter4_r(); + _ovr_perf_regs[17] = gr_egpc0_etpc0_sm0_dsm_perf_counter5_r(); + _ovr_perf_regs[18] = gr_egpc0_etpc0_sm0_dsm_perf_counter6_r(); + _ovr_perf_regs[19] = gr_egpc0_etpc0_sm0_dsm_perf_counter7_r(); +} + +/* Following are the blocks of registers that the ucode + * stores in the extended region. + */ +/* == ctxsw_extended_sm_dsm_perf_counter_register_stride_v() ? */ +static const u32 _num_sm_dsm_perf_regs; +/* == ctxsw_extended_sm_dsm_perf_counter_control_register_stride_v() ?*/ +static const u32 _num_sm_dsm_perf_ctrl_regs = 2; +static u32 *_sm_dsm_perf_regs; +static u32 _sm_dsm_perf_ctrl_regs[2]; + +void gv11b_gr_init_sm_dsm_reg_info(void) +{ + if (_sm_dsm_perf_ctrl_regs[0] != 0) + return; + + _sm_dsm_perf_ctrl_regs[0] = + gr_egpc0_etpc0_sm_dsm_perf_counter_control0_r(); + _sm_dsm_perf_ctrl_regs[1] = + gr_egpc0_etpc0_sm_dsm_perf_counter_control5_r(); +} + +void gv11b_gr_get_sm_dsm_perf_regs(struct gk20a *g, + u32 *num_sm_dsm_perf_regs, + u32 **sm_dsm_perf_regs, + u32 *perf_register_stride) +{ + *num_sm_dsm_perf_regs = _num_sm_dsm_perf_regs; + *sm_dsm_perf_regs = _sm_dsm_perf_regs; + *perf_register_stride = + ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(); +} + +void gv11b_gr_get_sm_dsm_perf_ctrl_regs(struct gk20a *g, + u32 *num_sm_dsm_perf_ctrl_regs, + u32 **sm_dsm_perf_ctrl_regs, + u32 *ctrl_register_stride) +{ + *num_sm_dsm_perf_ctrl_regs = _num_sm_dsm_perf_ctrl_regs; + *sm_dsm_perf_ctrl_regs = _sm_dsm_perf_ctrl_regs; + *ctrl_register_stride = + ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(); +} + +void gv11b_gr_get_ovr_perf_regs(struct gk20a *g, u32 *num_ovr_perf_regs, + u32 **ovr_perf_regs) +{ + *num_ovr_perf_regs = _num_ovr_perf_regs; + *ovr_perf_regs = _ovr_perf_regs; +} + +void gv11b_gr_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset) +{ + u32 reg_val; + u32 quad_ctrl; + u32 half_ctrl; + u32 tpc, gpc; + u32 gpc_tpc_addr; + u32 gpc_tpc_stride; + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, + GPU_LIT_TPC_IN_GPC_STRIDE); + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, "offset=0x%x", offset); + + gpc = pri_get_gpc_num(g, offset); + gpc_tpc_addr = pri_gpccs_addr_mask(offset); + tpc = g->ops.gr.get_tpc_num(g, gpc_tpc_addr); + + quad_ctrl = quad & 0x1; /* first bit tells us quad */ + half_ctrl = (quad >> 1) & 0x1; /* second bit tells us half */ + + gpc_tpc_stride = gpc * gpc_stride + tpc * tpc_in_gpc_stride; + gpc_tpc_addr = gr_gpc0_tpc0_sm_halfctl_ctrl_r() + gpc_tpc_stride; + + /* read from unicast reg */ + reg_val = gk20a_readl(g, gpc_tpc_addr); + reg_val = set_field(reg_val, + gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_m(), + gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_f(quad_ctrl)); + + /* write to broadcast reg */ + gk20a_writel(g, gr_gpcs_tpcs_sm_halfctl_ctrl_r(), reg_val); + + gpc_tpc_addr = gr_gpc0_tpc0_sm_debug_sfe_control_r() + gpc_tpc_stride; + reg_val = gk20a_readl(g, gpc_tpc_addr); + reg_val = set_field(reg_val, + gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_m(), + gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_f(half_ctrl)); + + /* write to broadcast reg */ + gk20a_writel(g, gr_gpcs_tpcs_sm_debug_sfe_control_r(), reg_val); +} + +static bool pri_is_egpc_addr_shared(struct gk20a *g, u32 addr) +{ + u32 egpc_shared_base = EGPC_PRI_SHARED_BASE; + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + + return (addr >= egpc_shared_base) && + (addr < egpc_shared_base + gpc_stride); +} + +bool gv11b_gr_pri_is_egpc_addr(struct gk20a *g, u32 addr) +{ + u32 egpc_base = g->ops.gr.get_egpc_base(g); + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + u32 num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS); + + return ((addr >= egpc_base) && + (addr < egpc_base + num_gpcs * gpc_stride)) || + pri_is_egpc_addr_shared(g, addr); +} + +static inline u32 pri_smpc_in_etpc_addr_mask(struct gk20a *g, u32 addr) +{ + u32 smpc_stride = nvgpu_get_litter_value(g, + GPU_LIT_SMPC_PRI_STRIDE); + + return (addr & (smpc_stride - 1)); +} + +static u32 pri_smpc_ext_addr(struct gk20a *g, u32 sm_offset, u32 gpc_num, + u32 tpc_num, u32 sm_num) +{ + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, + GPU_LIT_TPC_IN_GPC_BASE); + u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, + GPU_LIT_TPC_IN_GPC_STRIDE); + u32 egpc_base = g->ops.gr.get_egpc_base(g); + u32 smpc_unique_base = nvgpu_get_litter_value(g, + GPU_LIT_SMPC_PRI_UNIQUE_BASE); + u32 smpc_stride = nvgpu_get_litter_value(g, + GPU_LIT_SMPC_PRI_STRIDE); + + return (egpc_base + (gpc_num * gpc_stride) + tpc_in_gpc_base + + (tpc_num * tpc_in_gpc_stride) + + (sm_num * smpc_stride) + + (smpc_unique_base + sm_offset)); +} + +static bool pri_is_smpc_addr_in_etpc_shared(struct gk20a *g, u32 addr) +{ + u32 smpc_shared_base = nvgpu_get_litter_value(g, + GPU_LIT_SMPC_PRI_SHARED_BASE); + u32 smpc_stride = nvgpu_get_litter_value(g, + GPU_LIT_SMPC_PRI_STRIDE); + + return (addr >= smpc_shared_base) && + (addr < smpc_shared_base + smpc_stride); +} + +bool gv11b_gr_pri_is_etpc_addr(struct gk20a *g, u32 addr) +{ + u32 egpc_addr = 0; + + if (g->ops.gr.is_egpc_addr(g, addr)) { + egpc_addr = pri_gpccs_addr_mask(addr); + if (g->ops.gr.is_tpc_addr(g, egpc_addr)) + return true; + } + + return false; +} + +static u32 pri_get_egpc_num(struct gk20a *g, u32 addr) +{ + u32 i, start; + u32 egpc_base = g->ops.gr.get_egpc_base(g); + u32 num_gpcs = nvgpu_get_litter_value(g, GPU_LIT_NUM_GPCS); + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + + for (i = 0; i < num_gpcs; i++) { + start = egpc_base + (i * gpc_stride); + if ((addr >= start) && (addr < (start + gpc_stride))) + return i; + } + return 0; +} + +static u32 pri_egpc_addr(struct gk20a *g, u32 addr, u32 gpc) +{ + u32 egpc_base = g->ops.gr.get_egpc_base(g); + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + + return egpc_base + (gpc * gpc_stride) + addr; +} + +static u32 pri_etpc_addr(struct gk20a *g, u32 addr, u32 gpc, u32 tpc) +{ + u32 egpc_base = g->ops.gr.get_egpc_base(g); + u32 gpc_stride = nvgpu_get_litter_value(g, GPU_LIT_GPC_STRIDE); + u32 tpc_in_gpc_base = nvgpu_get_litter_value(g, + GPU_LIT_TPC_IN_GPC_BASE); + u32 tpc_in_gpc_stride = nvgpu_get_litter_value(g, + GPU_LIT_TPC_IN_GPC_STRIDE); + + return egpc_base + (gpc * gpc_stride) + + tpc_in_gpc_base + (tpc * tpc_in_gpc_stride) + + addr; +} + +void gv11b_gr_get_egpc_etpc_num(struct gk20a *g, u32 addr, + u32 *egpc_num, u32 *etpc_num) +{ + u32 egpc_addr = 0; + + *egpc_num = pri_get_egpc_num(g, addr); + egpc_addr = pri_gpccs_addr_mask(addr); + *etpc_num = g->ops.gr.get_tpc_num(g, egpc_addr); + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, + "egpc_num = %d etpc_num = %d", *egpc_num, *etpc_num); +} + +int gv11b_gr_decode_egpc_addr(struct gk20a *g, u32 addr, int *addr_type, + u32 *gpc_num, u32 *tpc_num, u32 *broadcast_flags) +{ + u32 gpc_addr; + u32 tpc_addr; + + if (g->ops.gr.is_egpc_addr(g, addr)) { + nvgpu_log_info(g, "addr=0x%x is egpc", addr); + + *addr_type = CTXSW_ADDR_TYPE_EGPC; + gpc_addr = pri_gpccs_addr_mask(addr); + if (pri_is_egpc_addr_shared(g, addr)) { + *broadcast_flags |= PRI_BROADCAST_FLAGS_EGPC; + *gpc_num = 0; + nvgpu_log_info(g, "shared egpc"); + } else { + *gpc_num = pri_get_egpc_num(g, addr); + nvgpu_log_info(g, "gpc=0x%x", *gpc_num); + } + if (g->ops.gr.is_tpc_addr(g, gpc_addr)) { + nvgpu_log_info(g, "addr=0x%x is etpc", addr); + *addr_type = CTXSW_ADDR_TYPE_ETPC; + if (pri_is_tpc_addr_shared(g, gpc_addr)) { + *broadcast_flags |= PRI_BROADCAST_FLAGS_ETPC; + *tpc_num = 0; + nvgpu_log_info(g, "shared etpc"); + } else { + *tpc_num = g->ops.gr.get_tpc_num(g, gpc_addr); + nvgpu_log_info(g, "tpc=0x%x", *tpc_num); + } + tpc_addr = pri_tpccs_addr_mask(addr); + if (pri_is_smpc_addr_in_etpc_shared(g, tpc_addr)) + *broadcast_flags |= PRI_BROADCAST_FLAGS_SMPC; + } + + nvgpu_log(g, gpu_dbg_fn | gpu_dbg_gpu_dbg, + "addr_type = %d, broadcast_flags = %#08x", + *addr_type, *broadcast_flags); + return 0; + } + return -EINVAL; +} + +static void gv11b_gr_update_priv_addr_table_smpc(struct gk20a *g, u32 gpc_num, + u32 tpc_num, u32 addr, + u32 *priv_addr_table, u32 *t) +{ + u32 sm_per_tpc, sm_num; + + nvgpu_log_info(g, "broadcast flags smpc"); + + sm_per_tpc = nvgpu_get_litter_value(g, GPU_LIT_NUM_SM_PER_TPC); + for (sm_num = 0; sm_num < sm_per_tpc; sm_num++) { + priv_addr_table[*t] = pri_smpc_ext_addr(g, + pri_smpc_in_etpc_addr_mask(g, addr), + gpc_num, tpc_num, sm_num); + nvgpu_log_info(g, "priv_addr_table[%d]:%#08x", + *t, priv_addr_table[*t]); + (*t)++; + } +} + +void gv11b_gr_egpc_etpc_priv_addr_table(struct gk20a *g, u32 addr, + u32 gpc, u32 broadcast_flags, u32 *priv_addr_table, u32 *t) +{ + u32 gpc_num, tpc_num; + + nvgpu_log_info(g, "addr=0x%x", addr); + + /* The GPC/TPC unicast registers are included in the compressed PRI + * tables. Convert a GPC/TPC broadcast address to unicast addresses so + * that we can look up the offsets. + */ + if (broadcast_flags & PRI_BROADCAST_FLAGS_EGPC) { + nvgpu_log_info(g, "broadcast flags egpc"); + for (gpc_num = 0; gpc_num < g->gr.gpc_count; gpc_num++) { + + if (broadcast_flags & PRI_BROADCAST_FLAGS_ETPC) { + nvgpu_log_info(g, "broadcast flags etpc"); + for (tpc_num = 0; + tpc_num < g->gr.gpc_tpc_count[gpc_num]; + tpc_num++) { + if (broadcast_flags & + PRI_BROADCAST_FLAGS_SMPC) { + gv11b_gr_update_priv_addr_table_smpc( + g, gpc_num, tpc_num, addr, + priv_addr_table, t); + } else { + priv_addr_table[*t] = + pri_etpc_addr(g, + pri_tpccs_addr_mask(addr), + gpc_num, tpc_num); + nvgpu_log_info(g, + "priv_addr_table[%d]:%#08x", + *t, priv_addr_table[*t]); + (*t)++; + } + } + } else if (broadcast_flags & PRI_BROADCAST_FLAGS_SMPC) { + tpc_num = 0; + gv11b_gr_update_priv_addr_table_smpc( + g, gpc_num, tpc_num, addr, + priv_addr_table, t); + } else { + priv_addr_table[*t] = + pri_egpc_addr(g, + pri_gpccs_addr_mask(addr), + gpc_num); + nvgpu_log_info(g, "priv_addr_table[%d]:%#08x", + *t, priv_addr_table[*t]); + (*t)++; + } + } + } else if (!(broadcast_flags & PRI_BROADCAST_FLAGS_EGPC)) { + if (broadcast_flags & PRI_BROADCAST_FLAGS_ETPC) { + nvgpu_log_info(g, "broadcast flags etpc but not egpc"); + gpc_num = 0; + for (tpc_num = 0; + tpc_num < g->gr.gpc_tpc_count[gpc]; + tpc_num++) { + if (broadcast_flags & + PRI_BROADCAST_FLAGS_SMPC) + gv11b_gr_update_priv_addr_table_smpc( + g, gpc_num, tpc_num, addr, + priv_addr_table, t); + else { + priv_addr_table[*t] = + pri_etpc_addr(g, + pri_tpccs_addr_mask(addr), + gpc, tpc_num); + nvgpu_log_info(g, + "priv_addr_table[%d]:%#08x", + *t, priv_addr_table[*t]); + (*t)++; + } + } + } else if (broadcast_flags & PRI_BROADCAST_FLAGS_SMPC) { + tpc_num = 0; + gpc_num = 0; + gv11b_gr_update_priv_addr_table_smpc( + g, gpc_num, tpc_num, addr, + priv_addr_table, t); + } else { + priv_addr_table[*t] = addr; + nvgpu_log_info(g, "priv_addr_table[%d]:%#08x", + *t, priv_addr_table[*t]); + (*t)++; + } + } +} + +u32 gv11b_gr_get_egpc_base(struct gk20a *g) +{ + return EGPC_PRI_BASE; +} + +void gr_gv11b_init_gpc_mmu(struct gk20a *g) +{ + u32 temp; + + nvgpu_log_info(g, "initialize gpc mmu"); + + if (!nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { + /* Bypass MMU check for non-secure boot. For + * secure-boot,this register write has no-effect */ + gk20a_writel(g, fb_priv_mmu_phy_secure_r(), 0xffffffff); + } + temp = gk20a_readl(g, fb_mmu_ctrl_r()); + temp &= gr_gpcs_pri_mmu_ctrl_vm_pg_size_m() | + gr_gpcs_pri_mmu_ctrl_use_pdb_big_page_size_m() | + gr_gpcs_pri_mmu_ctrl_vol_fault_m() | + gr_gpcs_pri_mmu_ctrl_comp_fault_m() | + gr_gpcs_pri_mmu_ctrl_miss_gran_m() | + gr_gpcs_pri_mmu_ctrl_cache_mode_m() | + gr_gpcs_pri_mmu_ctrl_mmu_aperture_m() | + gr_gpcs_pri_mmu_ctrl_mmu_vol_m() | + gr_gpcs_pri_mmu_ctrl_mmu_disable_m(); + gk20a_writel(g, gr_gpcs_pri_mmu_ctrl_r(), temp); + gk20a_writel(g, gr_gpcs_pri_mmu_pm_unit_mask_r(), 0); + gk20a_writel(g, gr_gpcs_pri_mmu_pm_req_mask_r(), 0); + + gk20a_writel(g, gr_gpcs_pri_mmu_debug_ctrl_r(), + gk20a_readl(g, fb_mmu_debug_ctrl_r())); + gk20a_writel(g, gr_gpcs_pri_mmu_debug_wr_r(), + gk20a_readl(g, fb_mmu_debug_wr_r())); + gk20a_writel(g, gr_gpcs_pri_mmu_debug_rd_r(), + gk20a_readl(g, fb_mmu_debug_rd_r())); +} diff --git a/drivers/gpu/nvgpu/gv11b/gr_gv11b.h b/drivers/gpu/nvgpu/gv11b/gr_gv11b.h new file mode 100644 index 000000000..b6ba231e2 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/gr_gv11b.h @@ -0,0 +1,215 @@ +/* + * GV11B GPU GR + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVGPU_GR_GV11B_H_ +#define _NVGPU_GR_GV11B_H_ + +#define EGPC_PRI_BASE 0x580000 +#define EGPC_PRI_SHARED_BASE 0x480000 + +#define PRI_BROADCAST_FLAGS_SMPC BIT(17) + +#define GV11B_ZBC_TYPE_STENCIL T19X_ZBC +#define ZBC_STENCIL_CLEAR_FMT_INVAILD 0 +#define ZBC_STENCIL_CLEAR_FMT_U8 1 + +struct zbc_s_table { + u32 stencil; + u32 format; + u32 ref_cnt; +}; + +struct gk20a; +struct zbc_entry; +struct zbc_query_params; +struct channel_ctx_gk20a; +struct nvgpu_warpstate; +struct nvgpu_gr_sm_error_state; + +enum { + VOLTA_CHANNEL_GPFIFO_A = 0xC36F, + VOLTA_A = 0xC397, + VOLTA_COMPUTE_A = 0xC3C0, + VOLTA_DMA_COPY_A = 0xC3B5, +}; + +#define NVC397_SET_SHADER_EXCEPTIONS 0x1528 +#define NVC397_SET_CIRCULAR_BUFFER_SIZE 0x1280 +#define NVC397_SET_ALPHA_CIRCULAR_BUFFER_SIZE 0x02dc +#define NVC397_SET_GO_IDLE_TIMEOUT 0x022c +#define NVC397_SET_TEX_IN_DBG 0x10bc +#define NVC397_SET_SKEDCHECK 0x10c0 +#define NVC397_SET_BES_CROP_DEBUG3 0x10c4 + +#define NVC397_SET_TEX_IN_DBG_TSL1_RVCH_INVALIDATE 0x1 +#define NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_LD 0x2 +#define NVC397_SET_TEX_IN_DBG_SM_L1TAG_CTRL_CACHE_SURFACE_ST 0x4 + +#define NVC397_SET_SKEDCHECK_18_MASK 0x3 +#define NVC397_SET_SKEDCHECK_18_DEFAULT 0x0 +#define NVC397_SET_SKEDCHECK_18_DISABLE 0x1 +#define NVC397_SET_SKEDCHECK_18_ENABLE 0x2 + +#define NVC3C0_SET_SKEDCHECK 0x23c + +#define NVA297_SET_SHADER_EXCEPTIONS_ENABLE_FALSE 0 + +int gr_gv11b_alloc_buffer(struct vm_gk20a *vm, size_t size, + struct nvgpu_mem *mem); +/*zcull*/ +void gr_gv11b_program_zcull_mapping(struct gk20a *g, u32 zcull_num_entries, + u32 *zcull_map_tiles); +void gr_gv11b_create_sysfs(struct gk20a *g); + +bool gr_gv11b_is_valid_class(struct gk20a *g, u32 class_num); +bool gr_gv11b_is_valid_gfx_class(struct gk20a *g, u32 class_num); +bool gr_gv11b_is_valid_compute_class(struct gk20a *g, u32 class_num); +void gr_gv11b_enable_hww_exceptions(struct gk20a *g); +void gr_gv11b_enable_exceptions(struct gk20a *g); +int gr_gv11b_handle_tpc_sm_ecc_exception(struct gk20a *g, + u32 gpc, u32 tpc, + bool *post_event, struct channel_gk20a *fault_ch, + u32 *hww_global_esr); +int gr_gv11b_handle_gcc_exception(struct gk20a *g, u32 gpc, u32 tpc, + bool *post_event, struct channel_gk20a *fault_ch, + u32 *hww_global_esr); +int gr_gv11b_handle_gpc_gpcmmu_exception(struct gk20a *g, u32 gpc, + u32 gpc_exception); +int gr_gv11b_handle_gpc_gpccs_exception(struct gk20a *g, u32 gpc, + u32 gpc_exception); +void gr_gv11b_enable_gpc_exceptions(struct gk20a *g); +int gr_gv11b_handle_tex_exception(struct gk20a *g, u32 gpc, u32 tpc, + bool *post_event); +int gr_gv11b_zbc_s_query_table(struct gk20a *g, struct gr_gk20a *gr, + struct zbc_query_params *query_params); +bool gr_gv11b_add_zbc_type_s(struct gk20a *g, struct gr_gk20a *gr, + struct zbc_entry *zbc_val, int *ret_val); +int gr_gv11b_add_zbc_stencil(struct gk20a *g, struct gr_gk20a *gr, + struct zbc_entry *stencil_val, u32 index); +int gr_gv11b_load_stencil_default_tbl(struct gk20a *g, + struct gr_gk20a *gr); +int gr_gv11b_load_stencil_tbl(struct gk20a *g, struct gr_gk20a *gr); +u32 gr_gv11b_pagepool_default_size(struct gk20a *g); +int gr_gv11b_calc_global_ctx_buffer_size(struct gk20a *g); +int gr_gv11b_handle_sw_method(struct gk20a *g, u32 addr, + u32 class_num, u32 offset, u32 data); +void gr_gv11b_bundle_cb_defaults(struct gk20a *g); +void gr_gv11b_cb_size_default(struct gk20a *g); +void gr_gv11b_set_alpha_circular_buffer_size(struct gk20a *g, u32 data); +void gr_gv11b_set_circular_buffer_size(struct gk20a *g, u32 data); +int gr_gv11b_dump_gr_status_regs(struct gk20a *g, + struct gk20a_debug_output *o); +int gr_gv11b_wait_empty(struct gk20a *g, unsigned long duration_ms, + u32 expect_delay); +void gr_gv11b_commit_global_attrib_cb(struct gk20a *g, + struct channel_ctx_gk20a *ch_ctx, + u64 addr, bool patch); +void gr_gv11b_set_gpc_tpc_mask(struct gk20a *g, u32 gpc_index); +void gr_gv11b_get_access_map(struct gk20a *g, + u32 **whitelist, int *num_entries); +int gr_gv11b_pre_process_sm_exception(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm, u32 global_esr, u32 warp_esr, + bool sm_debugger_attached, struct channel_gk20a *fault_ch, + bool *early_exit, bool *ignore_debugger); +int gr_gv11b_handle_fecs_error(struct gk20a *g, + struct channel_gk20a *__ch, + struct gr_gk20a_isr_data *isr_data); +int gr_gv11b_setup_rop_mapping(struct gk20a *g, struct gr_gk20a *gr); +int gr_gv11b_init_sw_veid_bundle(struct gk20a *g); +void gr_gv11b_detect_sm_arch(struct gk20a *g); +void gr_gv11b_program_sm_id_numbering(struct gk20a *g, + u32 gpc, u32 tpc, u32 smid); +int gr_gv11b_load_smid_config(struct gk20a *g); +int gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va); +int gr_gv11b_commit_global_timeslice(struct gk20a *g, struct channel_gk20a *c); +void gr_gv11b_write_zcull_ptr(struct gk20a *g, + struct nvgpu_mem *mem, u64 gpu_va); +void gr_gv11b_write_pm_ptr(struct gk20a *g, + struct nvgpu_mem *mem, u64 gpu_va); +void gr_gv11b_init_elcg_mode(struct gk20a *g, u32 mode, u32 engine); +void gr_gv11b_load_tpc_mask(struct gk20a *g); +void gr_gv11b_set_preemption_buffer_va(struct gk20a *g, + struct nvgpu_mem *mem, u64 gpu_va); +int gr_gv11b_init_fs_state(struct gk20a *g); +void gv11b_gr_get_esr_sm_sel(struct gk20a *g, u32 gpc, u32 tpc, + u32 *esr_sm_sel); +int gv11b_gr_sm_trigger_suspend(struct gk20a *g); +void gv11b_gr_bpt_reg_info(struct gk20a *g, struct nvgpu_warpstate *w_state); +int gv11b_gr_update_sm_error_state(struct gk20a *g, + struct channel_gk20a *ch, u32 sm_id, + struct nvgpu_gr_sm_error_state *sm_error_state); +int gv11b_gr_set_sm_debug_mode(struct gk20a *g, + struct channel_gk20a *ch, u64 sms, bool enable); +int gv11b_gr_record_sm_error_state(struct gk20a *g, u32 gpc, u32 tpc); +void gv11b_gr_set_hww_esr_report_mask(struct gk20a *g); +bool gv11b_gr_sm_debugger_attached(struct gk20a *g); +void gv11b_gr_suspend_single_sm(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm, + u32 global_esr_mask, bool check_errors); +void gv11b_gr_suspend_all_sms(struct gk20a *g, + u32 global_esr_mask, bool check_errors); +void gv11b_gr_resume_single_sm(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm); +void gv11b_gr_resume_all_sms(struct gk20a *g); +int gv11b_gr_resume_from_pause(struct gk20a *g); +u32 gv11b_gr_get_sm_hww_warp_esr(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm); +u32 gv11b_gr_get_sm_hww_global_esr(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm); +u32 gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask(struct gk20a *g); +int gv11b_gr_wait_for_sm_lock_down(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm, + u32 global_esr_mask, bool check_errors); +int gv11b_gr_lock_down_sm(struct gk20a *g, + u32 gpc, u32 tpc, u32 sm, u32 global_esr_mask, + bool check_errors); +void gv11b_gr_clear_sm_hww(struct gk20a *g, u32 gpc, u32 tpc, u32 sm, + u32 global_esr); +int gr_gv11b_handle_tpc_mpc_exception(struct gk20a *g, + u32 gpc, u32 tpc, bool *post_event); +void gv11b_gr_init_ovr_sm_dsm_perf(void); +void gv11b_gr_init_sm_dsm_reg_info(void); +void gv11b_gr_get_sm_dsm_perf_regs(struct gk20a *g, + u32 *num_sm_dsm_perf_regs, + u32 **sm_dsm_perf_regs, + u32 *perf_register_stride); +void gv11b_gr_get_sm_dsm_perf_ctrl_regs(struct gk20a *g, + u32 *num_sm_dsm_perf_ctrl_regs, + u32 **sm_dsm_perf_ctrl_regs, + u32 *ctrl_register_stride); +void gv11b_gr_get_ovr_perf_regs(struct gk20a *g, u32 *num_ovr_perf_regs, + u32 **ovr_perf_regs); +void gv11b_gr_access_smpc_reg(struct gk20a *g, u32 quad, u32 offset); +bool gv11b_gr_pri_is_egpc_addr(struct gk20a *g, u32 addr); +bool gv11b_gr_pri_is_etpc_addr(struct gk20a *g, u32 addr); +void gv11b_gr_get_egpc_etpc_num(struct gk20a *g, u32 addr, + u32 *egpc_num, u32 *etpc_num); +int gv11b_gr_decode_egpc_addr(struct gk20a *g, u32 addr, int *addr_type, + u32 *gpc_num, u32 *tpc_num, u32 *broadcast_flags); +void gv11b_gr_egpc_etpc_priv_addr_table(struct gk20a *g, u32 addr, + u32 gpc, u32 broadcast_flags, u32 *priv_addr_table, u32 *t); +u32 gv11b_gr_get_egpc_base(struct gk20a *g); +void gr_gv11b_init_gpc_mmu(struct gk20a *g); + +#endif diff --git a/drivers/gpu/nvgpu/gv11b/gv11b.c b/drivers/gpu/nvgpu/gv11b/gv11b.c new file mode 100644 index 000000000..211755e53 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/gv11b.c @@ -0,0 +1,38 @@ +/* + * GV11B Graphics + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "gk20a/gk20a.h" + +#include "gv11b/gv11b.h" + +int gv11b_init_gpu_characteristics(struct gk20a *g) +{ + gk20a_init_gpu_characteristics(g); + __nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true); + __nvgpu_set_enabled(g, NVGPU_SUPPORT_IO_COHERENCE, true); + return 0; +} diff --git a/drivers/gpu/nvgpu/gv11b/gv11b.h b/drivers/gpu/nvgpu/gv11b/gv11b.h new file mode 100644 index 000000000..3d5490e65 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/gv11b.h @@ -0,0 +1,32 @@ +/* + * GV11B Graphics + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef GV11B_H +#define GV11B_H + +#include "gk20a/gk20a.h" + +int gv11b_init_gpu_characteristics(struct gk20a *g); + +#endif /* GV11B_H */ diff --git a/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c b/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c new file mode 100644 index 000000000..9f6057ae4 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.c @@ -0,0 +1,748 @@ +/* + * Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * This file is autogenerated. Do not edit. + */ + +#ifndef __gv11b_gating_reglist_h__ +#define __gv11b_gating_reglist_h__ + +#include +#include "gv11b_gating_reglist.h" +#include + +struct gating_desc { + u32 addr; + u32 prod; + u32 disable; +}; +/* slcg bus */ +static const struct gating_desc gv11b_slcg_bus[] = { + {.addr = 0x00001c04, .prod = 0x00000000, .disable = 0x000003fe}, +}; + +/* slcg ce2 */ +static const struct gating_desc gv11b_slcg_ce2[] = { + {.addr = 0x00104204, .prod = 0x00000040, .disable = 0x000007fe}, +}; + +/* slcg chiplet */ +static const struct gating_desc gv11b_slcg_chiplet[] = { + {.addr = 0x0010c07c, .prod = 0x00000000, .disable = 0x00000007}, + {.addr = 0x0010e07c, .prod = 0x00000000, .disable = 0x00000007}, + {.addr = 0x0010d07c, .prod = 0x00000000, .disable = 0x00000007}, + {.addr = 0x0010e17c, .prod = 0x00000000, .disable = 0x00000007}, +}; + +/* slcg fb */ +static const struct gating_desc gv11b_slcg_fb[] = { + {.addr = 0x00100d14, .prod = 0x00000000, .disable = 0xfffffffe}, + {.addr = 0x00100c9c, .prod = 0x00000000, .disable = 0x000001fe}, +}; + +/* slcg fifo */ +static const struct gating_desc gv11b_slcg_fifo[] = { + {.addr = 0x000026ec, .prod = 0x00000000, .disable = 0x0001fffe}, +}; + +/* slcg gr */ +static const struct gating_desc gv11b_slcg_gr[] = { + {.addr = 0x004041f4, .prod = 0x00000000, .disable = 0x07fffffe}, + {.addr = 0x00409134, .prod = 0x00020008, .disable = 0x0003fffe}, + {.addr = 0x00409894, .prod = 0x00000000, .disable = 0x0000fffe}, + {.addr = 0x004078c4, .prod = 0x00000000, .disable = 0x000001fe}, + {.addr = 0x00406004, .prod = 0x00000200, .disable = 0x0001fffe}, + {.addr = 0x00405864, .prod = 0x00000000, .disable = 0x000001fe}, + {.addr = 0x00405910, .prod = 0xfffffff0, .disable = 0xfffffffe}, + {.addr = 0x00408044, .prod = 0x00000000, .disable = 0x000007fe}, + {.addr = 0x00407004, .prod = 0x00000000, .disable = 0x000001fe}, + {.addr = 0x00405bf4, .prod = 0x00000000, .disable = 0x00000002}, + {.addr = 0x0041a134, .prod = 0x00020008, .disable = 0x0003fffe}, + {.addr = 0x0041a894, .prod = 0x00000000, .disable = 0x0000fffe}, + {.addr = 0x00418504, .prod = 0x00000000, .disable = 0x0007fffe}, + {.addr = 0x0041860c, .prod = 0x00000000, .disable = 0x000001fe}, + {.addr = 0x0041868c, .prod = 0x00000000, .disable = 0x0000001e}, + {.addr = 0x0041871c, .prod = 0x00000000, .disable = 0x000003fe}, + {.addr = 0x00418388, .prod = 0x00000000, .disable = 0x00000001}, + {.addr = 0x0041882c, .prod = 0x00000000, .disable = 0x0001fffe}, + {.addr = 0x00418bc0, .prod = 0x00000000, .disable = 0x000001fe}, + {.addr = 0x00418974, .prod = 0x00000000, .disable = 0x0001fffe}, + {.addr = 0x00418c74, .prod = 0xffffff80, .disable = 0xfffffffe}, + {.addr = 0x00418cf4, .prod = 0xfffffff8, .disable = 0xfffffffe}, + {.addr = 0x00418d74, .prod = 0xffffffe0, .disable = 0xfffffffe}, + {.addr = 0x00418f10, .prod = 0xffffffe0, .disable = 0xfffffffe}, + {.addr = 0x00418e10, .prod = 0xfffffffe, .disable = 0xfffffffe}, + {.addr = 0x00419024, .prod = 0x000001fe, .disable = 0x000001fe}, + {.addr = 0x0041889c, .prod = 0x00000000, .disable = 0x000001fe}, + {.addr = 0x00419d24, .prod = 0x00000000, .disable = 0x000000ff}, + {.addr = 0x0041986c, .prod = 0x00000104, .disable = 0x00fffffe}, + {.addr = 0x00419c74, .prod = 0x0000001e, .disable = 0x0000001e}, + {.addr = 0x00419c84, .prod = 0x0003fff8, .disable = 0x0003fffe}, + {.addr = 0x00419c8c, .prod = 0xffffff84, .disable = 0xfffffffe}, + {.addr = 0x00419c94, .prod = 0x00080040, .disable = 0x000ffffe}, + {.addr = 0x00419ca4, .prod = 0x00003ffe, .disable = 0x00003ffe}, + {.addr = 0x00419cac, .prod = 0x0001fffe, .disable = 0x0001fffe}, + {.addr = 0x00419a44, .prod = 0x00000008, .disable = 0x0000000e}, + {.addr = 0x00419a4c, .prod = 0x000001f8, .disable = 0x000001fe}, + {.addr = 0x00419a54, .prod = 0x0000003c, .disable = 0x0000003e}, + {.addr = 0x00419a5c, .prod = 0x0000000c, .disable = 0x0000000e}, + {.addr = 0x00419a64, .prod = 0x000001ba, .disable = 0x000001fe}, + {.addr = 0x00419a7c, .prod = 0x0000003c, .disable = 0x0000003e}, + {.addr = 0x00419a84, .prod = 0x0000000c, .disable = 0x0000000e}, + {.addr = 0x0041be2c, .prod = 0x04115fc0, .disable = 0xfffffffe}, + {.addr = 0x0041bfec, .prod = 0xfffffff0, .disable = 0xfffffffe}, + {.addr = 0x0041bed4, .prod = 0xfffffff8, .disable = 0xfffffffe}, + {.addr = 0x00408814, .prod = 0x00000000, .disable = 0x0001fffe}, + {.addr = 0x00408a84, .prod = 0x00000000, .disable = 0x0001fffe}, + {.addr = 0x004089ac, .prod = 0x00000000, .disable = 0x0001fffe}, + {.addr = 0x00408a24, .prod = 0x00000000, .disable = 0x000000ff}, +}; + +/* slcg ltc */ +static const struct gating_desc gv11b_slcg_ltc[] = { + {.addr = 0x0017e050, .prod = 0x00000000, .disable = 0xfffffffe}, + {.addr = 0x0017e35c, .prod = 0x00000000, .disable = 0xfffffffe}, +}; + +/* slcg perf */ +static const struct gating_desc gv11b_slcg_perf[] = { + {.addr = 0x00248018, .prod = 0xffffffff, .disable = 0x00000000}, + {.addr = 0x00248018, .prod = 0xffffffff, .disable = 0x00000000}, + {.addr = 0x00246018, .prod = 0xffffffff, .disable = 0x00000000}, + {.addr = 0x00246018, .prod = 0xffffffff, .disable = 0x00000000}, + {.addr = 0x00246018, .prod = 0xffffffff, .disable = 0x00000000}, + {.addr = 0x00244018, .prod = 0xffffffff, .disable = 0x00000000}, + {.addr = 0x00244018, .prod = 0xffffffff, .disable = 0x00000000}, + {.addr = 0x00244018, .prod = 0xffffffff, .disable = 0x00000000}, + {.addr = 0x0024a124, .prod = 0x00000001, .disable = 0x00000000}, +}; + +/* slcg PriRing */ +static const struct gating_desc gv11b_slcg_priring[] = { + {.addr = 0x001200a8, .prod = 0x00000000, .disable = 0x00000001}, +}; + +/* slcg pwr_csb */ +static const struct gating_desc gv11b_slcg_pwr_csb[] = { + {.addr = 0x00000134, .prod = 0x00020008, .disable = 0x0003fffe}, + {.addr = 0x00000e74, .prod = 0x00000000, .disable = 0x0000000f}, + {.addr = 0x00000a74, .prod = 0x00004040, .disable = 0x00007ffe}, + {.addr = 0x000206b8, .prod = 0x00000008, .disable = 0x0000000f}, +}; + +/* slcg pmu */ +static const struct gating_desc gv11b_slcg_pmu[] = { + {.addr = 0x0010a134, .prod = 0x00020008, .disable = 0x0003fffe}, + {.addr = 0x0010aa74, .prod = 0x00004040, .disable = 0x00007ffe}, + {.addr = 0x0010ae74, .prod = 0x00000000, .disable = 0x0000000f}, +}; + +/* therm gr */ +static const struct gating_desc gv11b_slcg_therm[] = { + {.addr = 0x000206b8, .prod = 0x00000008, .disable = 0x0000000f}, +}; + +/* slcg Xbar */ +static const struct gating_desc gv11b_slcg_xbar[] = { + {.addr = 0x0013c824, .prod = 0x00000000, .disable = 0x7ffffffe}, + {.addr = 0x0013dc08, .prod = 0x00000000, .disable = 0xfffffffe}, + {.addr = 0x0013c924, .prod = 0x00000000, .disable = 0x7ffffffe}, + {.addr = 0x0013cbe4, .prod = 0x00000000, .disable = 0x1ffffffe}, + {.addr = 0x0013cc04, .prod = 0x00000000, .disable = 0x1ffffffe}, +}; + +/* blcg bus */ +static const struct gating_desc gv11b_blcg_bus[] = { + {.addr = 0x00001c00, .prod = 0x00000042, .disable = 0x00000000}, +}; + +/* blcg ce */ +static const struct gating_desc gv11b_blcg_ce[] = { + {.addr = 0x00104200, .prod = 0x0000c242, .disable = 0x00000000}, +}; + +/* blcg ctxsw prog */ +static const struct gating_desc gv11b_blcg_ctxsw_prog[] = { +}; + +/* blcg fb */ +static const struct gating_desc gv11b_blcg_fb[] = { + {.addr = 0x00100d10, .prod = 0x0000c242, .disable = 0x00000000}, + {.addr = 0x00100d30, .prod = 0x0000c242, .disable = 0x00000000}, + {.addr = 0x00100d3c, .prod = 0x00000242, .disable = 0x00000000}, + {.addr = 0x00100d48, .prod = 0x0000c242, .disable = 0x00000000}, + {.addr = 0x00100d1c, .prod = 0x00000042, .disable = 0x00000000}, + {.addr = 0x00100c98, .prod = 0x00004242, .disable = 0x00000000}, +}; + +/* blcg fifo */ +static const struct gating_desc gv11b_blcg_fifo[] = { + {.addr = 0x000026e0, .prod = 0x0000c244, .disable = 0x00000000}, +}; + +/* blcg gr */ +static const struct gating_desc gv11b_blcg_gr[] = { + {.addr = 0x004041f0, .prod = 0x0000c646, .disable = 0x00000000}, + {.addr = 0x00409890, .prod = 0x0000007f, .disable = 0x00000000}, + {.addr = 0x004098b0, .prod = 0x0000007f, .disable = 0x00000000}, + {.addr = 0x004078c0, .prod = 0x00004242, .disable = 0x00000000}, + {.addr = 0x00406000, .prod = 0x0000c444, .disable = 0x00000000}, + {.addr = 0x00405860, .prod = 0x0000c242, .disable = 0x00000000}, + {.addr = 0x0040590c, .prod = 0x0000c444, .disable = 0x00000000}, + {.addr = 0x00408040, .prod = 0x0000c444, .disable = 0x00000000}, + {.addr = 0x00407000, .prod = 0x4000c242, .disable = 0x00000000}, + {.addr = 0x00405bf0, .prod = 0x0000c444, .disable = 0x00000000}, + {.addr = 0x0041a890, .prod = 0x0000427f, .disable = 0x00000000}, + {.addr = 0x0041a8b0, .prod = 0x0000007f, .disable = 0x00000000}, + {.addr = 0x00418500, .prod = 0x0000c244, .disable = 0x00000000}, + {.addr = 0x00418608, .prod = 0x0000c242, .disable = 0x00000000}, + {.addr = 0x00418688, .prod = 0x0000c242, .disable = 0x00000000}, + {.addr = 0x00418718, .prod = 0x00000042, .disable = 0x00000000}, + {.addr = 0x00418828, .prod = 0x00008444, .disable = 0x00000000}, + {.addr = 0x00418bbc, .prod = 0x0000c242, .disable = 0x00000000}, + {.addr = 0x00418970, .prod = 0x0000c242, .disable = 0x00000000}, + {.addr = 0x00418c70, .prod = 0x0000c444, .disable = 0x00000000}, + {.addr = 0x00418cf0, .prod = 0x0000c444, .disable = 0x00000000}, + {.addr = 0x00418d70, .prod = 0x0000c444, .disable = 0x00000000}, + {.addr = 0x00418f0c, .prod = 0x0000c444, .disable = 0x00000000}, + {.addr = 0x00418e0c, .prod = 0x0000c444, .disable = 0x00000000}, + {.addr = 0x00419020, .prod = 0x0000c242, .disable = 0x00000000}, + {.addr = 0x00419038, .prod = 0x00000042, .disable = 0x00000000}, + {.addr = 0x00418898, .prod = 0x00004242, .disable = 0x00000000}, + {.addr = 0x00419868, .prod = 0x00008243, .disable = 0x00000000}, + {.addr = 0x00419c70, .prod = 0x0000c444, .disable = 0x00000000}, + {.addr = 0x00419c80, .prod = 0x00004045, .disable = 0x00000000}, + {.addr = 0x00419c88, .prod = 0x00004043, .disable = 0x00000000}, + {.addr = 0x00419c90, .prod = 0x0000004a, .disable = 0x00000000}, + {.addr = 0x00419c98, .prod = 0x00000042, .disable = 0x00000000}, + {.addr = 0x00419ca0, .prod = 0x00000043, .disable = 0x00000000}, + {.addr = 0x00419ca8, .prod = 0x00000003, .disable = 0x00000000}, + {.addr = 0x00419cb0, .prod = 0x00000002, .disable = 0x00000000}, + {.addr = 0x00419a40, .prod = 0x00000242, .disable = 0x00000000}, + {.addr = 0x00419a48, .prod = 0x00000242, .disable = 0x00000000}, + {.addr = 0x00419a50, .prod = 0x00000242, .disable = 0x00000000}, + {.addr = 0x00419a58, .prod = 0x00000242, .disable = 0x00000000}, + {.addr = 0x00419a60, .prod = 0x00000202, .disable = 0x00000000}, + {.addr = 0x00419a68, .prod = 0x00000202, .disable = 0x00000000}, + {.addr = 0x00419a78, .prod = 0x00000242, .disable = 0x00000000}, + {.addr = 0x00419a80, .prod = 0x00000242, .disable = 0x00000000}, + {.addr = 0x0041be28, .prod = 0x00008242, .disable = 0x00000000}, + {.addr = 0x0041bfe8, .prod = 0x0000c444, .disable = 0x00000000}, + {.addr = 0x0041bed0, .prod = 0x0000c444, .disable = 0x00000000}, + {.addr = 0x00408810, .prod = 0x0000c242, .disable = 0x00000000}, + {.addr = 0x00408a80, .prod = 0x0000c242, .disable = 0x00000000}, + {.addr = 0x004089a8, .prod = 0x0000c242, .disable = 0x00000000}, +}; + +/* blcg ltc */ +static const struct gating_desc gv11b_blcg_ltc[] = { + {.addr = 0x0017e030, .prod = 0x00000044, .disable = 0x00000000}, + {.addr = 0x0017e040, .prod = 0x00000044, .disable = 0x00000000}, + {.addr = 0x0017e3e0, .prod = 0x00000044, .disable = 0x00000000}, + {.addr = 0x0017e3c8, .prod = 0x00000044, .disable = 0x00000000}, +}; + +/* blcg pwr_csb */ +static const struct gating_desc gv11b_blcg_pwr_csb[] = { + {.addr = 0x00000a70, .prod = 0x00000045, .disable = 0x00000000}, +}; + +/* blcg pmu */ +static const struct gating_desc gv11b_blcg_pmu[] = { + {.addr = 0x0010aa70, .prod = 0x00000045, .disable = 0x00000000}, +}; + +/* blcg Xbar */ +static const struct gating_desc gv11b_blcg_xbar[] = { + {.addr = 0x0013c820, .prod = 0x0001004a, .disable = 0x00000000}, + {.addr = 0x0013dc04, .prod = 0x0001004a, .disable = 0x00000000}, + {.addr = 0x0013c920, .prod = 0x0000004a, .disable = 0x00000000}, + {.addr = 0x0013cbe0, .prod = 0x00000042, .disable = 0x00000000}, + {.addr = 0x0013cc00, .prod = 0x00000042, .disable = 0x00000000}, +}; + +/* pg gr */ +static const struct gating_desc gv11b_pg_gr[] = { +}; + +/* inline functions */ +void gv11b_slcg_bus_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_slcg_bus) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_slcg_bus[i].addr, + gv11b_slcg_bus[i].prod); + else + gk20a_writel(g, gv11b_slcg_bus[i].addr, + gv11b_slcg_bus[i].disable); + } +} + +void gv11b_slcg_ce2_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_slcg_ce2) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_slcg_ce2[i].addr, + gv11b_slcg_ce2[i].prod); + else + gk20a_writel(g, gv11b_slcg_ce2[i].addr, + gv11b_slcg_ce2[i].disable); + } +} + +void gv11b_slcg_chiplet_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_slcg_chiplet) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_slcg_chiplet[i].addr, + gv11b_slcg_chiplet[i].prod); + else + gk20a_writel(g, gv11b_slcg_chiplet[i].addr, + gv11b_slcg_chiplet[i].disable); + } +} + +void gv11b_slcg_ctxsw_firmware_load_gating_prod(struct gk20a *g, + bool prod) +{ +} + +void gv11b_slcg_fb_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_slcg_fb) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_slcg_fb[i].addr, + gv11b_slcg_fb[i].prod); + else + gk20a_writel(g, gv11b_slcg_fb[i].addr, + gv11b_slcg_fb[i].disable); + } +} + +void gv11b_slcg_fifo_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_slcg_fifo) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_slcg_fifo[i].addr, + gv11b_slcg_fifo[i].prod); + else + gk20a_writel(g, gv11b_slcg_fifo[i].addr, + gv11b_slcg_fifo[i].disable); + } +} + +void gr_gv11b_slcg_gr_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_slcg_gr) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_slcg_gr[i].addr, + gv11b_slcg_gr[i].prod); + else + gk20a_writel(g, gv11b_slcg_gr[i].addr, + gv11b_slcg_gr[i].disable); + } +} + +void ltc_gv11b_slcg_ltc_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_slcg_ltc) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_slcg_ltc[i].addr, + gv11b_slcg_ltc[i].prod); + else + gk20a_writel(g, gv11b_slcg_ltc[i].addr, + gv11b_slcg_ltc[i].disable); + } +} + +void gv11b_slcg_perf_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_slcg_perf) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_slcg_perf[i].addr, + gv11b_slcg_perf[i].prod); + else + gk20a_writel(g, gv11b_slcg_perf[i].addr, + gv11b_slcg_perf[i].disable); + } +} + +void gv11b_slcg_priring_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_slcg_priring) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_slcg_priring[i].addr, + gv11b_slcg_priring[i].prod); + else + gk20a_writel(g, gv11b_slcg_priring[i].addr, + gv11b_slcg_priring[i].disable); + } +} + +void gv11b_slcg_pwr_csb_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_slcg_pwr_csb) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_slcg_pwr_csb[i].addr, + gv11b_slcg_pwr_csb[i].prod); + else + gk20a_writel(g, gv11b_slcg_pwr_csb[i].addr, + gv11b_slcg_pwr_csb[i].disable); + } +} + +void gv11b_slcg_pmu_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_slcg_pmu) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_slcg_pmu[i].addr, + gv11b_slcg_pmu[i].prod); + else + gk20a_writel(g, gv11b_slcg_pmu[i].addr, + gv11b_slcg_pmu[i].disable); + } +} + +void gv11b_slcg_therm_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_slcg_therm) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_slcg_therm[i].addr, + gv11b_slcg_therm[i].prod); + else + gk20a_writel(g, gv11b_slcg_therm[i].addr, + gv11b_slcg_therm[i].disable); + } +} + +void gv11b_slcg_xbar_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_slcg_xbar) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_SLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_slcg_xbar[i].addr, + gv11b_slcg_xbar[i].prod); + else + gk20a_writel(g, gv11b_slcg_xbar[i].addr, + gv11b_slcg_xbar[i].disable); + } +} + +void gv11b_blcg_bus_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_blcg_bus) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_blcg_bus[i].addr, + gv11b_blcg_bus[i].prod); + else + gk20a_writel(g, gv11b_blcg_bus[i].addr, + gv11b_blcg_bus[i].disable); + } +} + +void gv11b_blcg_ce_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_blcg_ce) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_blcg_ce[i].addr, + gv11b_blcg_ce[i].prod); + else + gk20a_writel(g, gv11b_blcg_ce[i].addr, + gv11b_blcg_ce[i].disable); + } +} + +void gv11b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_blcg_ctxsw_prog) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_blcg_ctxsw_prog[i].addr, + gv11b_blcg_ctxsw_prog[i].prod); + else + gk20a_writel(g, gv11b_blcg_ctxsw_prog[i].addr, + gv11b_blcg_ctxsw_prog[i].disable); + } +} + +void gv11b_blcg_fb_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_blcg_fb) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_blcg_fb[i].addr, + gv11b_blcg_fb[i].prod); + else + gk20a_writel(g, gv11b_blcg_fb[i].addr, + gv11b_blcg_fb[i].disable); + } +} + +void gv11b_blcg_fifo_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_blcg_fifo) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_blcg_fifo[i].addr, + gv11b_blcg_fifo[i].prod); + else + gk20a_writel(g, gv11b_blcg_fifo[i].addr, + gv11b_blcg_fifo[i].disable); + } +} + +void gv11b_blcg_gr_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_blcg_gr) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_blcg_gr[i].addr, + gv11b_blcg_gr[i].prod); + else + gk20a_writel(g, gv11b_blcg_gr[i].addr, + gv11b_blcg_gr[i].disable); + } +} + +void gv11b_blcg_ltc_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_blcg_ltc) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_blcg_ltc[i].addr, + gv11b_blcg_ltc[i].prod); + else + gk20a_writel(g, gv11b_blcg_ltc[i].addr, + gv11b_blcg_ltc[i].disable); + } +} + +void gv11b_blcg_pwr_csb_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_blcg_pwr_csb) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_blcg_pwr_csb[i].addr, + gv11b_blcg_pwr_csb[i].prod); + else + gk20a_writel(g, gv11b_blcg_pwr_csb[i].addr, + gv11b_blcg_pwr_csb[i].disable); + } +} + +void gv11b_blcg_pmu_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_blcg_pmu) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_blcg_pmu[i].addr, + gv11b_blcg_pmu[i].prod); + else + gk20a_writel(g, gv11b_blcg_pmu[i].addr, + gv11b_blcg_pmu[i].disable); + } +} + +void gv11b_blcg_xbar_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_blcg_xbar) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_blcg_xbar[i].addr, + gv11b_blcg_xbar[i].prod); + else + gk20a_writel(g, gv11b_blcg_xbar[i].addr, + gv11b_blcg_xbar[i].disable); + } +} + +void gr_gv11b_pg_gr_load_gating_prod(struct gk20a *g, + bool prod) +{ + u32 i; + u32 size = sizeof(gv11b_pg_gr) / sizeof(struct gating_desc); + + if (!nvgpu_is_enabled(g, NVGPU_GPU_CAN_BLCG)) + return; + + for (i = 0; i < size; i++) { + if (prod) + gk20a_writel(g, gv11b_pg_gr[i].addr, + gv11b_pg_gr[i].prod); + else + gk20a_writel(g, gv11b_pg_gr[i].addr, + gv11b_pg_gr[i].disable); + } +} + +#endif /* __gv11b_gating_reglist_h__ */ diff --git a/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.h b/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.h new file mode 100644 index 000000000..233189e04 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/gv11b_gating_reglist.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2016, NVIDIA Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gk20a/gk20a.h" + +void gv11b_slcg_bus_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_slcg_ce2_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_slcg_chiplet_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_slcg_ctxsw_firmware_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_slcg_fb_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_slcg_fifo_load_gating_prod(struct gk20a *g, + bool prod); + +void gr_gv11b_slcg_gr_load_gating_prod(struct gk20a *g, + bool prod); + +void ltc_gv11b_slcg_ltc_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_slcg_perf_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_slcg_priring_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_slcg_pwr_csb_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_slcg_pmu_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_slcg_therm_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_slcg_xbar_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_blcg_bus_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_blcg_ce_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_blcg_ctxsw_firmware_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_blcg_fb_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_blcg_fifo_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_blcg_gr_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_blcg_ltc_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_blcg_pwr_csb_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_blcg_pmu_load_gating_prod(struct gk20a *g, + bool prod); + +void gv11b_blcg_xbar_load_gating_prod(struct gk20a *g, + bool prod); + +void gr_gv11b_pg_gr_load_gating_prod(struct gk20a *g, + bool prod); + diff --git a/drivers/gpu/nvgpu/gv11b/hal_gv11b.c b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c new file mode 100644 index 000000000..fc059caa2 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/hal_gv11b.c @@ -0,0 +1,778 @@ +/* + * GV11B Tegra HAL interface + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include +#include + +#include "gk20a/gk20a.h" +#include "gk20a/fifo_gk20a.h" +#include "gk20a/fecs_trace_gk20a.h" +#include "gk20a/css_gr_gk20a.h" +#include "gk20a/mc_gk20a.h" +#include "gk20a/mm_gk20a.h" +#include "gk20a/dbg_gpu_gk20a.h" +#include "gk20a/bus_gk20a.h" +#include "gk20a/flcn_gk20a.h" +#include "gk20a/regops_gk20a.h" +#include "gk20a/fb_gk20a.h" +#include "gk20a/pmu_gk20a.h" +#include "gk20a/gr_gk20a.h" + +#include "gm20b/ltc_gm20b.h" +#include "gm20b/gr_gm20b.h" +#include "gm20b/fb_gm20b.h" +#include "gm20b/fifo_gm20b.h" +#include "gm20b/mm_gm20b.h" +#include "gm20b/acr_gm20b.h" +#include "gm20b/pmu_gm20b.h" + +#include "gp10b/ltc_gp10b.h" +#include "gp10b/therm_gp10b.h" +#include "gp10b/mc_gp10b.h" +#include "gp10b/ce_gp10b.h" +#include "gp10b/priv_ring_gp10b.h" +#include "gp10b/fifo_gp10b.h" +#include "gp10b/fecs_trace_gp10b.h" +#include "gp10b/fb_gp10b.h" +#include "gp10b/mm_gp10b.h" +#include "gp10b/pmu_gp10b.h" +#include "gp10b/gr_gp10b.h" + +#include "gp106/pmu_gp106.h" +#include "gp106/acr_gp106.h" + +#include "gv100/gr_gv100.h" + +#include "dbg_gpu_gv11b.h" +#include "hal_gv11b.h" +#include "css_gr_gv11b.h" +#include "gr_gv11b.h" +#include "mc_gv11b.h" +#include "ltc_gv11b.h" +#include "gv11b.h" +#include "ce_gv11b.h" +#include "gr_ctx_gv11b.h" +#include "mm_gv11b.h" +#include "pmu_gv11b.h" +#include "acr_gv11b.h" +#include "fb_gv11b.h" +#include "fifo_gv11b.h" +#include "gv11b_gating_reglist.h" +#include "regops_gv11b.h" +#include "subctx_gv11b.h" +#include "therm_gv11b.h" + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +int gv11b_get_litter_value(struct gk20a *g, int value) +{ + int ret = EINVAL; + switch (value) { + case GPU_LIT_NUM_GPCS: + ret = proj_scal_litter_num_gpcs_v(); + break; + case GPU_LIT_NUM_PES_PER_GPC: + ret = proj_scal_litter_num_pes_per_gpc_v(); + break; + case GPU_LIT_NUM_ZCULL_BANKS: + ret = proj_scal_litter_num_zcull_banks_v(); + break; + case GPU_LIT_NUM_TPC_PER_GPC: + ret = proj_scal_litter_num_tpc_per_gpc_v(); + break; + case GPU_LIT_NUM_SM_PER_TPC: + ret = proj_scal_litter_num_sm_per_tpc_v(); + break; + case GPU_LIT_NUM_FBPS: + ret = proj_scal_litter_num_fbps_v(); + break; + case GPU_LIT_GPC_BASE: + ret = proj_gpc_base_v(); + break; + case GPU_LIT_GPC_STRIDE: + ret = proj_gpc_stride_v(); + break; + case GPU_LIT_GPC_SHARED_BASE: + ret = proj_gpc_shared_base_v(); + break; + case GPU_LIT_TPC_IN_GPC_BASE: + ret = proj_tpc_in_gpc_base_v(); + break; + case GPU_LIT_TPC_IN_GPC_STRIDE: + ret = proj_tpc_in_gpc_stride_v(); + break; + case GPU_LIT_TPC_IN_GPC_SHARED_BASE: + ret = proj_tpc_in_gpc_shared_base_v(); + break; + case GPU_LIT_PPC_IN_GPC_BASE: + ret = proj_ppc_in_gpc_base_v(); + break; + case GPU_LIT_PPC_IN_GPC_SHARED_BASE: + ret = proj_ppc_in_gpc_shared_base_v(); + break; + case GPU_LIT_PPC_IN_GPC_STRIDE: + ret = proj_ppc_in_gpc_stride_v(); + break; + case GPU_LIT_ROP_BASE: + ret = proj_rop_base_v(); + break; + case GPU_LIT_ROP_STRIDE: + ret = proj_rop_stride_v(); + break; + case GPU_LIT_ROP_SHARED_BASE: + ret = proj_rop_shared_base_v(); + break; + case GPU_LIT_HOST_NUM_ENGINES: + ret = proj_host_num_engines_v(); + break; + case GPU_LIT_HOST_NUM_PBDMA: + ret = proj_host_num_pbdma_v(); + break; + case GPU_LIT_LTC_STRIDE: + ret = proj_ltc_stride_v(); + break; + case GPU_LIT_LTS_STRIDE: + ret = proj_lts_stride_v(); + break; + case GPU_LIT_SM_PRI_STRIDE: + ret = proj_sm_stride_v(); + break; + case GPU_LIT_SMPC_PRI_BASE: + ret = proj_smpc_base_v(); + break; + case GPU_LIT_SMPC_PRI_SHARED_BASE: + ret = proj_smpc_shared_base_v(); + break; + case GPU_LIT_SMPC_PRI_UNIQUE_BASE: + ret = proj_smpc_unique_base_v(); + break; + case GPU_LIT_SMPC_PRI_STRIDE: + ret = proj_smpc_stride_v(); + break; + /* Even though GV11B doesn't have an FBPA unit, the HW reports one, + * and the microcode as a result leaves space in the context buffer + * for one, so make sure SW accounts for this also. + */ + case GPU_LIT_NUM_FBPAS: + ret = proj_scal_litter_num_fbpas_v(); + break; + /* Hardcode FBPA values other than NUM_FBPAS to 0. */ + case GPU_LIT_FBPA_STRIDE: + case GPU_LIT_FBPA_BASE: + case GPU_LIT_FBPA_SHARED_BASE: + ret = 0; + break; + case GPU_LIT_TWOD_CLASS: + ret = FERMI_TWOD_A; + break; + case GPU_LIT_THREED_CLASS: + ret = VOLTA_A; + break; + case GPU_LIT_COMPUTE_CLASS: + ret = VOLTA_COMPUTE_A; + break; + case GPU_LIT_GPFIFO_CLASS: + ret = VOLTA_CHANNEL_GPFIFO_A; + break; + case GPU_LIT_I2M_CLASS: + ret = KEPLER_INLINE_TO_MEMORY_B; + break; + case GPU_LIT_DMA_COPY_CLASS: + ret = VOLTA_DMA_COPY_A; + break; + + default: + nvgpu_err(g, "Missing definition %d", value); + BUG(); + break; + } + + return ret; +} + +static const struct gpu_ops gv11b_ops = { + .ltc = { + .determine_L2_size_bytes = gp10b_determine_L2_size_bytes, + .set_zbc_s_entry = gv11b_ltc_set_zbc_stencil_entry, + .set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry, + .set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry, + .init_cbc = NULL, + .init_fs_state = gv11b_ltc_init_fs_state, + .init_comptags = gp10b_ltc_init_comptags, + .cbc_ctrl = gm20b_ltc_cbc_ctrl, + .isr = gv11b_ltc_isr, + .cbc_fix_config = gv11b_ltc_cbc_fix_config, + .flush = gm20b_flush_ltc, + .set_enabled = gp10b_ltc_set_enabled, + }, + .ce2 = { + .isr_stall = gv11b_ce_isr, + .isr_nonstall = gp10b_ce_nonstall_isr, + .get_num_pce = gv11b_ce_get_num_pce, + }, + .gr = { + .get_patch_slots = gr_gv100_get_patch_slots, + .init_gpc_mmu = gr_gv11b_init_gpc_mmu, + .bundle_cb_defaults = gr_gv11b_bundle_cb_defaults, + .cb_size_default = gr_gv11b_cb_size_default, + .calc_global_ctx_buffer_size = + gr_gv11b_calc_global_ctx_buffer_size, + .commit_global_attrib_cb = gr_gv11b_commit_global_attrib_cb, + .commit_global_bundle_cb = gr_gp10b_commit_global_bundle_cb, + .commit_global_cb_manager = gr_gp10b_commit_global_cb_manager, + .commit_global_pagepool = gr_gp10b_commit_global_pagepool, + .handle_sw_method = gr_gv11b_handle_sw_method, + .set_alpha_circular_buffer_size = + gr_gv11b_set_alpha_circular_buffer_size, + .set_circular_buffer_size = gr_gv11b_set_circular_buffer_size, + .enable_hww_exceptions = gr_gv11b_enable_hww_exceptions, + .is_valid_class = gr_gv11b_is_valid_class, + .is_valid_gfx_class = gr_gv11b_is_valid_gfx_class, + .is_valid_compute_class = gr_gv11b_is_valid_compute_class, + .get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs, + .get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs, + .init_fs_state = gr_gv11b_init_fs_state, + .set_hww_esr_report_mask = gv11b_gr_set_hww_esr_report_mask, + .falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments, + .load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode, + .set_gpc_tpc_mask = gr_gv11b_set_gpc_tpc_mask, + .get_gpc_tpc_mask = gr_gm20b_get_gpc_tpc_mask, + .free_channel_ctx = gk20a_free_channel_ctx, + .alloc_obj_ctx = gk20a_alloc_obj_ctx, + .bind_ctxsw_zcull = gr_gk20a_bind_ctxsw_zcull, + .get_zcull_info = gr_gk20a_get_zcull_info, + .is_tpc_addr = gr_gm20b_is_tpc_addr, + .get_tpc_num = gr_gm20b_get_tpc_num, + .detect_sm_arch = gr_gv11b_detect_sm_arch, + .add_zbc_color = gr_gp10b_add_zbc_color, + .add_zbc_depth = gr_gp10b_add_zbc_depth, + .zbc_set_table = gk20a_gr_zbc_set_table, + .zbc_query_table = gr_gk20a_query_zbc, + .pmu_save_zbc = gk20a_pmu_save_zbc, + .add_zbc = gr_gk20a_add_zbc, + .pagepool_default_size = gr_gv11b_pagepool_default_size, + .init_ctx_state = gr_gp10b_init_ctx_state, + .alloc_gr_ctx = gr_gp10b_alloc_gr_ctx, + .free_gr_ctx = gr_gp10b_free_gr_ctx, + .update_ctxsw_preemption_mode = + gr_gp10b_update_ctxsw_preemption_mode, + .dump_gr_regs = gr_gv11b_dump_gr_status_regs, + .update_pc_sampling = gr_gm20b_update_pc_sampling, + .get_fbp_en_mask = gr_gm20b_get_fbp_en_mask, + .get_max_ltc_per_fbp = gr_gm20b_get_max_ltc_per_fbp, + .get_max_lts_per_ltc = gr_gm20b_get_max_lts_per_ltc, + .get_rop_l2_en_mask = gr_gm20b_rop_l2_en_mask, + .get_max_fbps_count = gr_gm20b_get_max_fbps_count, + .init_sm_dsm_reg_info = gv11b_gr_init_sm_dsm_reg_info, + .wait_empty = gr_gv11b_wait_empty, + .init_cyclestats = gr_gm20b_init_cyclestats, + .set_sm_debug_mode = gv11b_gr_set_sm_debug_mode, + .enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs, + .bpt_reg_info = gv11b_gr_bpt_reg_info, + .get_access_map = gr_gv11b_get_access_map, + .handle_fecs_error = gr_gv11b_handle_fecs_error, + .handle_sm_exception = gr_gk20a_handle_sm_exception, + .handle_tex_exception = gr_gv11b_handle_tex_exception, + .enable_gpc_exceptions = gr_gv11b_enable_gpc_exceptions, + .enable_exceptions = gr_gv11b_enable_exceptions, + .get_lrf_tex_ltc_dram_override = get_ecc_override_val, + .update_smpc_ctxsw_mode = gr_gk20a_update_smpc_ctxsw_mode, + .update_hwpm_ctxsw_mode = gr_gk20a_update_hwpm_ctxsw_mode, + .record_sm_error_state = gv11b_gr_record_sm_error_state, + .update_sm_error_state = gv11b_gr_update_sm_error_state, + .clear_sm_error_state = gm20b_gr_clear_sm_error_state, + .suspend_contexts = gr_gp10b_suspend_contexts, + .resume_contexts = gr_gk20a_resume_contexts, + .get_preemption_mode_flags = gr_gp10b_get_preemption_mode_flags, + .init_sm_id_table = gr_gv100_init_sm_id_table, + .load_smid_config = gr_gv11b_load_smid_config, + .program_sm_id_numbering = gr_gv11b_program_sm_id_numbering, + .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr, + .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr, + .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr, + .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr, + .setup_rop_mapping = gr_gv11b_setup_rop_mapping, + .program_zcull_mapping = gr_gv11b_program_zcull_mapping, + .commit_global_timeslice = gr_gv11b_commit_global_timeslice, + .commit_inst = gr_gv11b_commit_inst, + .write_zcull_ptr = gr_gv11b_write_zcull_ptr, + .write_pm_ptr = gr_gv11b_write_pm_ptr, + .init_elcg_mode = gr_gv11b_init_elcg_mode, + .load_tpc_mask = gr_gv11b_load_tpc_mask, + .inval_icache = gr_gk20a_inval_icache, + .trigger_suspend = gv11b_gr_sm_trigger_suspend, + .wait_for_pause = gr_gk20a_wait_for_pause, + .resume_from_pause = gv11b_gr_resume_from_pause, + .clear_sm_errors = gr_gk20a_clear_sm_errors, + .tpc_enabled_exceptions = gr_gk20a_tpc_enabled_exceptions, + .get_esr_sm_sel = gv11b_gr_get_esr_sm_sel, + .sm_debugger_attached = gv11b_gr_sm_debugger_attached, + .suspend_single_sm = gv11b_gr_suspend_single_sm, + .suspend_all_sms = gv11b_gr_suspend_all_sms, + .resume_single_sm = gv11b_gr_resume_single_sm, + .resume_all_sms = gv11b_gr_resume_all_sms, + .get_sm_hww_warp_esr = gv11b_gr_get_sm_hww_warp_esr, + .get_sm_hww_global_esr = gv11b_gr_get_sm_hww_global_esr, + .get_sm_no_lock_down_hww_global_esr_mask = + gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask, + .lock_down_sm = gv11b_gr_lock_down_sm, + .wait_for_sm_lock_down = gv11b_gr_wait_for_sm_lock_down, + .clear_sm_hww = gv11b_gr_clear_sm_hww, + .init_ovr_sm_dsm_perf = gv11b_gr_init_ovr_sm_dsm_perf, + .get_ovr_perf_regs = gv11b_gr_get_ovr_perf_regs, + .disable_rd_coalesce = gm20a_gr_disable_rd_coalesce, + .set_boosted_ctx = gr_gp10b_set_boosted_ctx, + .set_preemption_mode = gr_gp10b_set_preemption_mode, + .set_czf_bypass = NULL, + .pre_process_sm_exception = gr_gv11b_pre_process_sm_exception, + .set_preemption_buffer_va = gr_gv11b_set_preemption_buffer_va, + .init_preemption_state = NULL, + .update_boosted_ctx = gr_gp10b_update_boosted_ctx, + .set_bes_crop_debug3 = gr_gp10b_set_bes_crop_debug3, + .create_gr_sysfs = gr_gv11b_create_sysfs, + .set_ctxsw_preemption_mode = gr_gp10b_set_ctxsw_preemption_mode, + .is_etpc_addr = gv11b_gr_pri_is_etpc_addr, + .egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table, + .handle_tpc_mpc_exception = gr_gv11b_handle_tpc_mpc_exception, + .zbc_s_query_table = gr_gv11b_zbc_s_query_table, + .load_zbc_s_default_tbl = gr_gv11b_load_stencil_default_tbl, + .handle_gpc_gpcmmu_exception = + gr_gv11b_handle_gpc_gpcmmu_exception, + .add_zbc_type_s = gr_gv11b_add_zbc_type_s, + .get_egpc_base = gv11b_gr_get_egpc_base, + .get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num, + .handle_gpc_gpccs_exception = + gr_gv11b_handle_gpc_gpccs_exception, + .load_zbc_s_tbl = gr_gv11b_load_stencil_tbl, + .access_smpc_reg = gv11b_gr_access_smpc_reg, + .is_egpc_addr = gv11b_gr_pri_is_egpc_addr, + .add_zbc_s = gr_gv11b_add_zbc_stencil, + .handle_gcc_exception = gr_gv11b_handle_gcc_exception, + .init_sw_veid_bundle = gr_gv11b_init_sw_veid_bundle, + .handle_tpc_sm_ecc_exception = + gr_gv11b_handle_tpc_sm_ecc_exception, + .decode_egpc_addr = gv11b_gr_decode_egpc_addr, + .init_ctxsw_hdr_data = gr_gp10b_init_ctxsw_hdr_data, + }, + .fb = { + .reset = gv11b_fb_reset, + .init_hw = gk20a_fb_init_hw, + .init_fs_state = gv11b_fb_init_fs_state, + .init_cbc = gv11b_fb_init_cbc, + .set_mmu_page_size = gm20b_fb_set_mmu_page_size, + .set_use_full_comp_tag_line = + gm20b_fb_set_use_full_comp_tag_line, + .compression_page_size = gp10b_fb_compression_page_size, + .compressible_page_size = gp10b_fb_compressible_page_size, + .vpr_info_fetch = gm20b_fb_vpr_info_fetch, + .dump_vpr_wpr_info = gm20b_fb_dump_vpr_wpr_info, + .read_wpr_info = gm20b_fb_read_wpr_info, + .is_debug_mode_enabled = gm20b_fb_debug_mode_enabled, + .set_debug_mode = gm20b_fb_set_debug_mode, + .tlb_invalidate = gk20a_fb_tlb_invalidate, + .hub_isr = gv11b_fb_hub_isr, + .mem_unlock = NULL, + }, + .clock_gating = { + .slcg_bus_load_gating_prod = + gv11b_slcg_bus_load_gating_prod, + .slcg_ce2_load_gating_prod = + gv11b_slcg_ce2_load_gating_prod, + .slcg_chiplet_load_gating_prod = + gv11b_slcg_chiplet_load_gating_prod, + .slcg_ctxsw_firmware_load_gating_prod = + gv11b_slcg_ctxsw_firmware_load_gating_prod, + .slcg_fb_load_gating_prod = + gv11b_slcg_fb_load_gating_prod, + .slcg_fifo_load_gating_prod = + gv11b_slcg_fifo_load_gating_prod, + .slcg_gr_load_gating_prod = + gr_gv11b_slcg_gr_load_gating_prod, + .slcg_ltc_load_gating_prod = + ltc_gv11b_slcg_ltc_load_gating_prod, + .slcg_perf_load_gating_prod = + gv11b_slcg_perf_load_gating_prod, + .slcg_priring_load_gating_prod = + gv11b_slcg_priring_load_gating_prod, + .slcg_pmu_load_gating_prod = + gv11b_slcg_pmu_load_gating_prod, + .slcg_therm_load_gating_prod = + gv11b_slcg_therm_load_gating_prod, + .slcg_xbar_load_gating_prod = + gv11b_slcg_xbar_load_gating_prod, + .blcg_bus_load_gating_prod = + gv11b_blcg_bus_load_gating_prod, + .blcg_ce_load_gating_prod = + gv11b_blcg_ce_load_gating_prod, + .blcg_ctxsw_firmware_load_gating_prod = + gv11b_blcg_ctxsw_firmware_load_gating_prod, + .blcg_fb_load_gating_prod = + gv11b_blcg_fb_load_gating_prod, + .blcg_fifo_load_gating_prod = + gv11b_blcg_fifo_load_gating_prod, + .blcg_gr_load_gating_prod = + gv11b_blcg_gr_load_gating_prod, + .blcg_ltc_load_gating_prod = + gv11b_blcg_ltc_load_gating_prod, + .blcg_pwr_csb_load_gating_prod = + gv11b_blcg_pwr_csb_load_gating_prod, + .blcg_pmu_load_gating_prod = + gv11b_blcg_pmu_load_gating_prod, + .blcg_xbar_load_gating_prod = + gv11b_blcg_xbar_load_gating_prod, + .pg_gr_load_gating_prod = + gr_gv11b_pg_gr_load_gating_prod, + }, + .fifo = { + .get_preempt_timeout = gv11b_fifo_get_preempt_timeout, + .init_fifo_setup_hw = gv11b_init_fifo_setup_hw, + .bind_channel = channel_gm20b_bind, + .unbind_channel = channel_gv11b_unbind, + .disable_channel = gk20a_fifo_disable_channel, + .enable_channel = gk20a_fifo_enable_channel, + .alloc_inst = gk20a_fifo_alloc_inst, + .free_inst = gk20a_fifo_free_inst, + .setup_ramfc = channel_gv11b_setup_ramfc, + .channel_set_timeslice = gk20a_fifo_set_timeslice, + .default_timeslice_us = gk20a_fifo_default_timeslice_us, + .setup_userd = gk20a_fifo_setup_userd, + .userd_gp_get = gv11b_userd_gp_get, + .userd_gp_put = gv11b_userd_gp_put, + .userd_pb_get = gv11b_userd_pb_get, + .pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val, + .preempt_channel = gv11b_fifo_preempt_channel, + .preempt_tsg = gv11b_fifo_preempt_tsg, + .enable_tsg = gv11b_fifo_enable_tsg, + .disable_tsg = gk20a_disable_tsg, + .tsg_verify_channel_status = gk20a_fifo_tsg_unbind_channel_verify_status, + .tsg_verify_status_ctx_reload = gm20b_fifo_tsg_verify_status_ctx_reload, + .tsg_verify_status_faulted = gv11b_fifo_tsg_verify_status_faulted, + .update_runlist = gk20a_fifo_update_runlist, + .trigger_mmu_fault = NULL, + .get_mmu_fault_info = NULL, + .wait_engine_idle = gk20a_fifo_wait_engine_idle, + .get_num_fifos = gv11b_fifo_get_num_fifos, + .get_pbdma_signature = gp10b_fifo_get_pbdma_signature, + .set_runlist_interleave = gk20a_fifo_set_runlist_interleave, + .tsg_set_timeslice = gk20a_fifo_tsg_set_timeslice, + .force_reset_ch = gk20a_fifo_force_reset_ch, + .engine_enum_from_type = gp10b_fifo_engine_enum_from_type, + .device_info_data_parse = gp10b_device_info_data_parse, + .eng_runlist_base_size = fifo_eng_runlist_base__size_1_v, + .init_engine_info = gk20a_fifo_init_engine_info, + .runlist_entry_size = ram_rl_entry_size_v, + .get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry, + .get_ch_runlist_entry = gv11b_get_ch_runlist_entry, + .is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc, + .dump_pbdma_status = gk20a_dump_pbdma_status, + .dump_eng_status = gv11b_dump_eng_status, + .dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc, + .intr_0_error_mask = gv11b_fifo_intr_0_error_mask, + .is_preempt_pending = gv11b_fifo_is_preempt_pending, + .init_pbdma_intr_descs = gv11b_fifo_init_pbdma_intr_descs, + .reset_enable_hw = gv11b_init_fifo_reset_enable_hw, + .teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg, + .handle_sched_error = gv11b_fifo_handle_sched_error, + .handle_pbdma_intr_0 = gv11b_fifo_handle_pbdma_intr_0, + .handle_pbdma_intr_1 = gv11b_fifo_handle_pbdma_intr_1, + .init_eng_method_buffers = gv11b_fifo_init_eng_method_buffers, + .deinit_eng_method_buffers = + gv11b_fifo_deinit_eng_method_buffers, + .tsg_bind_channel = gk20a_tsg_bind_channel, + .tsg_unbind_channel = gk20a_tsg_unbind_channel, +#ifdef CONFIG_TEGRA_GK20A_NVHOST + .alloc_syncpt_buf = gv11b_fifo_alloc_syncpt_buf, + .free_syncpt_buf = gv11b_fifo_free_syncpt_buf, + .add_syncpt_wait_cmd = gv11b_fifo_add_syncpt_wait_cmd, + .get_syncpt_wait_cmd_size = gv11b_fifo_get_syncpt_wait_cmd_size, + .add_syncpt_incr_cmd = gv11b_fifo_add_syncpt_incr_cmd, + .get_syncpt_incr_cmd_size = gv11b_fifo_get_syncpt_incr_cmd_size, +#endif + .resetup_ramfc = NULL, + .device_info_fault_id = top_device_info_data_fault_id_enum_v, + .free_channel_ctx_header = gv11b_free_subctx_header, + .preempt_ch_tsg = gv11b_fifo_preempt_ch_tsg, + .handle_ctxsw_timeout = gv11b_fifo_handle_ctxsw_timeout, + }, + .gr_ctx = { + .get_netlist_name = gr_gv11b_get_netlist_name, + .is_fw_defined = gr_gv11b_is_firmware_defined, + }, +#ifdef CONFIG_GK20A_CTXSW_TRACE + .fecs_trace = { + .alloc_user_buffer = NULL, + .free_user_buffer = NULL, + .mmap_user_buffer = NULL, + .init = NULL, + .deinit = NULL, + .enable = NULL, + .disable = NULL, + .is_enabled = NULL, + .reset = NULL, + .flush = NULL, + .poll = NULL, + .bind_channel = NULL, + .unbind_channel = NULL, + .max_entries = NULL, + }, +#endif /* CONFIG_GK20A_CTXSW_TRACE */ + .mm = { + .support_sparse = gm20b_mm_support_sparse, + .gmmu_map = gk20a_locked_gmmu_map, + .gmmu_unmap = gk20a_locked_gmmu_unmap, + .vm_bind_channel = gk20a_vm_bind_channel, + .fb_flush = gk20a_mm_fb_flush, + .l2_invalidate = gk20a_mm_l2_invalidate, + .l2_flush = gv11b_mm_l2_flush, + .cbc_clean = gk20a_mm_cbc_clean, + .set_big_page_size = gm20b_mm_set_big_page_size, + .get_big_page_sizes = gm20b_mm_get_big_page_sizes, + .get_default_big_page_size = gp10b_mm_get_default_big_page_size, + .gpu_phys_addr = gv11b_gpu_phys_addr, + .get_iommu_bit = gp10b_mm_get_iommu_bit, + .get_mmu_levels = gp10b_mm_get_mmu_levels, + .init_pdb = gp10b_mm_init_pdb, + .init_mm_setup_hw = gv11b_init_mm_setup_hw, + .is_bar1_supported = gv11b_mm_is_bar1_supported, + .alloc_inst_block = gk20a_alloc_inst_block, + .init_inst_block = gv11b_init_inst_block, + .mmu_fault_pending = gv11b_mm_mmu_fault_pending, + .get_kind_invalid = gm20b_get_kind_invalid, + .get_kind_pitch = gm20b_get_kind_pitch, + .init_bar2_vm = gb10b_init_bar2_vm, + .init_bar2_mm_hw_setup = gv11b_init_bar2_mm_hw_setup, + .remove_bar2_vm = gv11b_mm_remove_bar2_vm, + .fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy, + }, + .therm = { + .init_therm_setup_hw = gp10b_init_therm_setup_hw, + .elcg_init_idle_filters = gv11b_elcg_init_idle_filters, + }, + .pmu = { + .pmu_setup_elpg = gp10b_pmu_setup_elpg, + .pmu_get_queue_head = pwr_pmu_queue_head_r, + .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v, + .pmu_get_queue_tail = pwr_pmu_queue_tail_r, + .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v, + .pmu_queue_head = gk20a_pmu_queue_head, + .pmu_queue_tail = gk20a_pmu_queue_tail, + .pmu_msgq_tail = gk20a_pmu_msgq_tail, + .pmu_mutex_size = pwr_pmu_mutex__size_1_v, + .pmu_mutex_acquire = gk20a_pmu_mutex_acquire, + .pmu_mutex_release = gk20a_pmu_mutex_release, + .write_dmatrfbase = gp10b_write_dmatrfbase, + .pmu_elpg_statistics = gp106_pmu_elpg_statistics, + .pmu_pg_init_param = gv11b_pg_gr_init, + .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list, + .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list, + .dump_secure_fuses = pmu_dump_security_fuses_gp10b, + .reset_engine = gp106_pmu_engine_reset, + .is_engine_in_reset = gp106_pmu_is_engine_in_reset, + .pmu_nsbootstrap = gv11b_pmu_bootstrap, + .pmu_pg_set_sub_feature_mask = gv11b_pg_set_subfeature_mask, + .is_pmu_supported = gv11b_is_pmu_supported, + }, + .regops = { + .get_global_whitelist_ranges = + gv11b_get_global_whitelist_ranges, + .get_global_whitelist_ranges_count = + gv11b_get_global_whitelist_ranges_count, + .get_context_whitelist_ranges = + gv11b_get_context_whitelist_ranges, + .get_context_whitelist_ranges_count = + gv11b_get_context_whitelist_ranges_count, + .get_runcontrol_whitelist = gv11b_get_runcontrol_whitelist, + .get_runcontrol_whitelist_count = + gv11b_get_runcontrol_whitelist_count, + .get_runcontrol_whitelist_ranges = + gv11b_get_runcontrol_whitelist_ranges, + .get_runcontrol_whitelist_ranges_count = + gv11b_get_runcontrol_whitelist_ranges_count, + .get_qctl_whitelist = gv11b_get_qctl_whitelist, + .get_qctl_whitelist_count = gv11b_get_qctl_whitelist_count, + .get_qctl_whitelist_ranges = gv11b_get_qctl_whitelist_ranges, + .get_qctl_whitelist_ranges_count = + gv11b_get_qctl_whitelist_ranges_count, + .apply_smpc_war = gv11b_apply_smpc_war, + }, + .mc = { + .intr_enable = mc_gv11b_intr_enable, + .intr_unit_config = mc_gp10b_intr_unit_config, + .isr_stall = mc_gp10b_isr_stall, + .intr_stall = mc_gp10b_intr_stall, + .intr_stall_pause = mc_gp10b_intr_stall_pause, + .intr_stall_resume = mc_gp10b_intr_stall_resume, + .intr_nonstall = mc_gp10b_intr_nonstall, + .intr_nonstall_pause = mc_gp10b_intr_nonstall_pause, + .intr_nonstall_resume = mc_gp10b_intr_nonstall_resume, + .enable = gk20a_mc_enable, + .disable = gk20a_mc_disable, + .reset = gk20a_mc_reset, + .boot_0 = gk20a_mc_boot_0, + .is_intr1_pending = mc_gp10b_is_intr1_pending, + .is_intr_hub_pending = gv11b_mc_is_intr_hub_pending, + }, + .debug = { + .show_dump = gk20a_debug_show_dump, + }, + .dbg_session_ops = { + .exec_reg_ops = exec_regops_gk20a, + .dbg_set_powergate = dbg_set_powergate, + .check_and_set_global_reservation = + nvgpu_check_and_set_global_reservation, + .check_and_set_context_reservation = + nvgpu_check_and_set_context_reservation, + .release_profiler_reservation = + nvgpu_release_profiler_reservation, + .perfbuffer_enable = gv11b_perfbuf_enable_locked, + .perfbuffer_disable = gv11b_perfbuf_disable_locked, + }, + .bus = { + .init_hw = gk20a_bus_init_hw, + .isr = gk20a_bus_isr, + .read_ptimer = gk20a_read_ptimer, + .get_timestamps_zipper = nvgpu_get_timestamps_zipper, + .bar1_bind = NULL, + }, +#if defined(CONFIG_GK20A_CYCLE_STATS) + .css = { + .enable_snapshot = gv11b_css_hw_enable_snapshot, + .disable_snapshot = gv11b_css_hw_disable_snapshot, + .check_data_available = gv11b_css_hw_check_data_available, + .set_handled_snapshots = css_hw_set_handled_snapshots, + .allocate_perfmon_ids = css_gr_allocate_perfmon_ids, + .release_perfmon_ids = css_gr_release_perfmon_ids, + }, +#endif + .falcon = { + .falcon_hal_sw_init = gk20a_falcon_hal_sw_init, + }, + .priv_ring = { + .isr = gp10b_priv_ring_isr, + }, + .chip_init_gpu_characteristics = gv11b_init_gpu_characteristics, + .get_litter_value = gv11b_get_litter_value, +}; + +int gv11b_init_hal(struct gk20a *g) +{ + struct gpu_ops *gops = &g->ops; + u32 val; + bool priv_security; + + gops->ltc = gv11b_ops.ltc; + gops->ce2 = gv11b_ops.ce2; + gops->gr = gv11b_ops.gr; + gops->fb = gv11b_ops.fb; + gops->clock_gating = gv11b_ops.clock_gating; + gops->fifo = gv11b_ops.fifo; + gops->gr_ctx = gv11b_ops.gr_ctx; + gops->mm = gv11b_ops.mm; +#ifdef CONFIG_GK20A_CTXSW_TRACE + gops->fecs_trace = gv11b_ops.fecs_trace; +#endif + gops->therm = gv11b_ops.therm; + gops->pmu = gv11b_ops.pmu; + gops->regops = gv11b_ops.regops; + gops->mc = gv11b_ops.mc; + gops->debug = gv11b_ops.debug; + gops->dbg_session_ops = gv11b_ops.dbg_session_ops; + gops->bus = gv11b_ops.bus; +#if defined(CONFIG_GK20A_CYCLE_STATS) + gops->css = gv11b_ops.css; +#endif + gops->falcon = gv11b_ops.falcon; + gops->priv_ring = gv11b_ops.priv_ring; + + /* Lone functions */ + gops->chip_init_gpu_characteristics = + gv11b_ops.chip_init_gpu_characteristics; + gops->get_litter_value = gv11b_ops.get_litter_value; + + val = gk20a_readl(g, fuse_opt_priv_sec_en_r()); + if (val) { + priv_security = true; + pr_err("priv security is enabled\n"); + } else { + priv_security = false; + pr_err("priv security is disabled\n"); + } + __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false); + __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, priv_security); + __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, priv_security); + + /* priv security dependent ops */ + if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { + /* Add in ops from gm20b acr */ + gops->pmu.prepare_ucode = gp106_prepare_ucode_blob, + gops->pmu.pmu_setup_hw_and_bootstrap = gv11b_bootstrap_hs_flcn, + gops->pmu.get_wpr = gm20b_wpr_info, + gops->pmu.alloc_blob_space = gm20b_alloc_blob_space, + gops->pmu.pmu_populate_loader_cfg = + gp106_pmu_populate_loader_cfg, + gops->pmu.flcn_populate_bl_dmem_desc = + gp106_flcn_populate_bl_dmem_desc, + gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt, + gops->pmu.falcon_clear_halt_interrupt_status = + clear_halt_interrupt_status, + gops->pmu.init_falcon_setup_hw = gv11b_init_pmu_setup_hw1, + + gops->pmu.init_wpr_region = gm20b_pmu_init_acr; + gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode; + gops->pmu.is_lazy_bootstrap = gv11b_is_lazy_bootstrap, + gops->pmu.is_priv_load = gv11b_is_priv_load, + + gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode; + } else { + /* Inherit from gk20a */ + gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob, + gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1, + + gops->pmu.load_lsfalcon_ucode = NULL; + gops->pmu.init_wpr_region = NULL; + gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1; + + gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode; + } + + __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); + g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT; + + g->name = "gv11b"; + + return 0; +} diff --git a/drivers/gpu/nvgpu/gv11b/hal_gv11b.h b/drivers/gpu/nvgpu/gv11b/hal_gv11b.h new file mode 100644 index 000000000..668353dc1 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/hal_gv11b.h @@ -0,0 +1,31 @@ +/* + * GV11B Tegra HAL interface + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _NVGPU_HAL_GV11B_H +#define _NVGPU_HAL_GV11B_H +struct gk20a; + +int gv11b_init_hal(struct gk20a *gops); +int gv11b_get_litter_value(struct gk20a *g, int value); +#endif diff --git a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c new file mode 100644 index 000000000..a199e024b --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.c @@ -0,0 +1,205 @@ +/* + * GV11B LTC + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gk20a/gk20a.h" +#include "gp10b/ltc_gp10b.h" + +#include "ltc_gv11b.h" + +#include +#include +#include +#include +#include + +/* + * Sets the ZBC stencil for the passed index. + */ +void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g, + struct zbc_entry *stencil_val, + u32 index) +{ + u32 real_index = index + GK20A_STARTOF_ZBC_TABLE; + + gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_index_r(), + ltc_ltcs_ltss_dstg_zbc_index_address_f(real_index)); + + gk20a_writel(g, ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(), + stencil_val->depth); + + gk20a_readl(g, ltc_ltcs_ltss_dstg_zbc_index_r()); +} + +void gv11b_ltc_init_fs_state(struct gk20a *g) +{ + u32 ltc_intr; + u32 reg; + + gk20a_dbg_info("initialize gv11b l2"); + + g->ops.mc.reset(g, mc_enable_pfb_enabled_f() | + mc_enable_l2_enabled_f()); + + reg = gk20a_readl(g, mc_elpg_enable_r()); + reg |= mc_elpg_enable_l2_enabled_f(); + gk20a_writel(g, mc_elpg_enable_r(), reg); + + g->max_ltc_count = gk20a_readl(g, top_num_ltcs_r()); + g->ltc_count = gk20a_readl(g, pri_ringmaster_enum_ltc_r()); + gk20a_dbg_info("%u ltcs out of %u", g->ltc_count, g->max_ltc_count); + + gk20a_writel(g, ltc_ltcs_ltss_dstg_cfg0_r(), + gk20a_readl(g, ltc_ltc0_lts0_dstg_cfg0_r()) | + ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m()); + + /* Disable LTC interrupts */ + reg = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); + reg &= ~ltc_ltcs_ltss_intr_en_evicted_cb_m(); + reg &= ~ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(); + gk20a_writel(g, ltc_ltcs_ltss_intr_r(), reg); + + /* Enable ECC interrupts */ + ltc_intr = gk20a_readl(g, ltc_ltcs_ltss_intr_r()); + ltc_intr |= ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f() | + ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(); + gk20a_writel(g, ltc_ltcs_ltss_intr_r(), + ltc_intr); +} + +void gv11b_ltc_isr(struct gk20a *g) +{ + u32 mc_intr, ltc_intr3; + unsigned int ltc, slice; + u32 ltc_stride = nvgpu_get_litter_value(g, GPU_LIT_LTC_STRIDE); + u32 lts_stride = nvgpu_get_litter_value(g, GPU_LIT_LTS_STRIDE); + u32 ecc_status, ecc_addr, corrected_cnt, uncorrected_cnt; + u32 corrected_delta, uncorrected_delta; + u32 corrected_overflow, uncorrected_overflow; + u32 ltc_corrected, ltc_uncorrected; + + mc_intr = gk20a_readl(g, mc_intr_ltc_r()); + for (ltc = 0; ltc < g->ltc_count; ltc++) { + if ((mc_intr & 1 << ltc) == 0) + continue; + ltc_corrected = ltc_uncorrected = 0; + + for (slice = 0; slice < g->gr.slices_per_ltc; slice++) { + u32 offset = ltc_stride * ltc + lts_stride * slice; + ltc_intr3 = gk20a_readl(g, ltc_ltc0_lts0_intr3_r() + + offset); + + /* Detect and handle ECC PARITY errors */ + + if (ltc_intr3 & + (ltc_ltcs_ltss_intr3_ecc_uncorrected_m() | + ltc_ltcs_ltss_intr3_ecc_corrected_m())) { + + ecc_status = gk20a_readl(g, + ltc_ltc0_lts0_l2_cache_ecc_status_r() + + offset); + ecc_addr = gk20a_readl(g, + ltc_ltc0_lts0_l2_cache_ecc_address_r() + + offset); + corrected_cnt = gk20a_readl(g, + ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset); + uncorrected_cnt = gk20a_readl(g, + ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset); + + corrected_delta = + ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_v(corrected_cnt); + uncorrected_delta = + ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_v(uncorrected_cnt); + corrected_overflow = ecc_status & + ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_m(); + + uncorrected_overflow = ecc_status & + ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m(); + + /* clear the interrupt */ + if ((corrected_delta > 0) || corrected_overflow) { + gk20a_writel(g, ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r() + offset, 0); + } + if ((uncorrected_delta > 0) || uncorrected_overflow) { + gk20a_writel(g, + ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r() + offset, 0); + } + + gk20a_writel(g, ltc_ltc0_lts0_l2_cache_ecc_status_r() + offset, + ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f()); + + /* update counters per slice */ + if (corrected_overflow) + corrected_delta += (0x1UL << ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s()); + if (uncorrected_overflow) + uncorrected_delta += (0x1UL << ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s()); + + ltc_corrected += corrected_delta; + ltc_uncorrected += uncorrected_delta; + nvgpu_log(g, gpu_dbg_intr, + "ltc:%d lts: %d cache ecc interrupt intr: 0x%x", ltc, slice, ltc_intr3); + + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m()) + nvgpu_log(g, gpu_dbg_intr, "rstg ecc error corrected"); + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m()) + nvgpu_log(g, gpu_dbg_intr, "rstg ecc error uncorrected"); + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m()) + nvgpu_log(g, gpu_dbg_intr, "tstg ecc error corrected"); + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m()) + nvgpu_log(g, gpu_dbg_intr, "tstg ecc error uncorrected"); + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m()) + nvgpu_log(g, gpu_dbg_intr, "dstg ecc error corrected"); + if (ecc_status & ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m()) + nvgpu_log(g, gpu_dbg_intr, "dstg ecc error uncorrected"); + + if (corrected_overflow || uncorrected_overflow) + nvgpu_info(g, "ecc counter overflow!"); + + nvgpu_log(g, gpu_dbg_intr, + "ecc error address: 0x%x", ecc_addr); + + } + + } + g->ecc.ltc.t19x.l2_cache_corrected_err_count.counters[ltc] += + ltc_corrected; + g->ecc.ltc.t19x.l2_cache_uncorrected_err_count.counters[ltc] += + ltc_uncorrected; + + } + + /* fallback to other interrupts */ + gp10b_ltc_isr(g); +} + +u32 gv11b_ltc_cbc_fix_config(struct gk20a *g, int base) +{ + u32 val = gk20a_readl(g, ltc_ltcs_ltss_cbc_num_active_ltcs_r()); + + if (ltc_ltcs_ltss_cbc_num_active_ltcs__v(val) == 2) + return base * 2; + else if (ltc_ltcs_ltss_cbc_num_active_ltcs__v(val) != 1) { + nvgpu_err(g, "Invalid number of active ltcs: %08x", val); + } + return base; +} diff --git a/drivers/gpu/nvgpu/gv11b/ltc_gv11b.h b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.h new file mode 100644 index 000000000..9b46e74cd --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/ltc_gv11b.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef LTC_GV11B_H +#define LTC_GV11B_H +struct gk20a; + +void gv11b_ltc_set_zbc_stencil_entry(struct gk20a *g, + struct zbc_entry *stencil_val, + u32 index); +void gv11b_ltc_init_fs_state(struct gk20a *g); +void gv11b_ltc_isr(struct gk20a *g); +u32 gv11b_ltc_cbc_fix_config(struct gk20a *g, int base); + +#endif diff --git a/drivers/gpu/nvgpu/gv11b/mc_gv11b.c b/drivers/gpu/nvgpu/gv11b/mc_gv11b.c new file mode 100644 index 000000000..74c5c4d60 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/mc_gv11b.c @@ -0,0 +1,92 @@ +/* + * GV11B master + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include "gk20a/gk20a.h" + +#include "gp10b/mc_gp10b.h" + +#include "mc_gv11b.h" +#include "fb_gv11b.h" + +#include + +void mc_gv11b_intr_enable(struct gk20a *g) +{ + u32 eng_intr_mask = gk20a_fifo_engine_interrupt_mask(g); + + gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_STALLING), + 0xffffffff); + gk20a_writel(g, mc_intr_en_clear_r(NVGPU_MC_INTR_NONSTALLING), + 0xffffffff); + gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_ALL); + + g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING] = + mc_intr_pfifo_pending_f() | + mc_intr_hub_pending_f() | + mc_intr_priv_ring_pending_f() | + mc_intr_pbus_pending_f() | + mc_intr_ltc_pending_f() | + eng_intr_mask; + + g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING] = + mc_intr_pfifo_pending_f() + | eng_intr_mask; + + /* TODO: Enable PRI faults for HUB ECC err intr */ + gv11b_fb_enable_hub_intr(g, STALL_REG_INDEX, g->mm.hub_intr_types); + + gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_STALLING), + g->mc_intr_mask_restore[NVGPU_MC_INTR_STALLING]); + + gk20a_writel(g, mc_intr_en_set_r(NVGPU_MC_INTR_NONSTALLING), + g->mc_intr_mask_restore[NVGPU_MC_INTR_NONSTALLING]); + +} + +bool gv11b_mc_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0) +{ + return ((mc_intr_0 & mc_intr_hub_pending_f()) ? true : false); +} + +bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id) +{ + u32 mc_intr_0 = gk20a_readl(g, mc_intr_r(0)); + u32 stall_intr, eng_intr_mask; + + eng_intr_mask = gk20a_fifo_act_eng_interrupt_mask(g, act_eng_id); + if (mc_intr_0 & eng_intr_mask) + return true; + + stall_intr = mc_intr_pfifo_pending_f() | + mc_intr_hub_pending_f() | + mc_intr_priv_ring_pending_f() | + mc_intr_pbus_pending_f() | + mc_intr_ltc_pending_f(); + if (mc_intr_0 & stall_intr) + return true; + + return false; +} diff --git a/drivers/gpu/nvgpu/gv11b/mc_gv11b.h b/drivers/gpu/nvgpu/gv11b/mc_gv11b.h new file mode 100644 index 000000000..eb9d0e4ea --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/mc_gv11b.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef MC_GV11B_H +#define MC_GV11B_H +struct gk20a; + +void mc_gv11b_intr_enable(struct gk20a *g); +bool gv11b_mc_is_intr_hub_pending(struct gk20a *g, u32 mc_intr_0); +bool gv11b_mc_is_stall_and_eng_intr_pending(struct gk20a *g, u32 act_eng_id); +#endif diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.c b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c new file mode 100644 index 000000000..fdc506ac4 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.c @@ -0,0 +1,330 @@ +/* + * GV11B MMU + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include +#include +#include + +#include "gk20a/gk20a.h" +#include "gk20a/mm_gk20a.h" + +#include "gp10b/mm_gp10b.h" +#include "gp10b/mc_gp10b.h" + +#include "mm_gv11b.h" +#include "fb_gv11b.h" + +#include +#include +#include + +#define NVGPU_L3_ALLOC_BIT BIT(36) + +bool gv11b_mm_is_bar1_supported(struct gk20a *g) +{ + return false; +} + +void gv11b_init_inst_block(struct nvgpu_mem *inst_block, + struct vm_gk20a *vm, u32 big_page_size) +{ + struct gk20a *g = gk20a_from_vm(vm); + + gk20a_dbg_info("inst block phys = 0x%llx, kv = 0x%p", + nvgpu_inst_block_addr(g, inst_block), inst_block->cpu_va); + + g->ops.mm.init_pdb(g, inst_block, vm); + + if (big_page_size && g->ops.mm.set_big_page_size) + g->ops.mm.set_big_page_size(g, inst_block, big_page_size); +} + +bool gv11b_mm_mmu_fault_pending(struct gk20a *g) +{ + return gv11b_fb_mmu_fault_pending(g); +} + +void gv11b_mm_fault_info_mem_destroy(struct gk20a *g) +{ + nvgpu_log_fn(g, " "); + + nvgpu_mutex_acquire(&g->mm.hub_isr_mutex); + + gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_OTHER | + HUB_INTR_TYPE_NONREPLAY | HUB_INTR_TYPE_REPLAY); + + nvgpu_kfree(g, g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY]); + + g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] = NULL; + g->mm.fault_info[FAULT_TYPE_REPLAY] = NULL; + + nvgpu_mutex_release(&g->mm.hub_isr_mutex); + nvgpu_mutex_destroy(&g->mm.hub_isr_mutex); +} + +static int gv11b_mm_mmu_fault_info_buf_init(struct gk20a *g, + u32 *hub_intr_types) +{ + struct mmu_fault_info *fault_info_mem; + + fault_info_mem = nvgpu_kzalloc(g, sizeof(struct mmu_fault_info) * + FAULT_TYPE_NUM); + if (!fault_info_mem) { + nvgpu_log_info(g, "failed to alloc shadow fault info"); + return -ENOMEM; + } + /* shadow buffer for copying mmu fault info */ + g->mm.fault_info[FAULT_TYPE_OTHER_AND_NONREPLAY] = + &fault_info_mem[FAULT_TYPE_OTHER_AND_NONREPLAY]; + + g->mm.fault_info[FAULT_TYPE_REPLAY] = + &fault_info_mem[FAULT_TYPE_REPLAY]; + + *hub_intr_types |= HUB_INTR_TYPE_OTHER; + return 0; +} + +static void gv11b_mm_mmu_hw_fault_buf_init(struct gk20a *g, + u32 *hub_intr_types) +{ + struct vm_gk20a *vm = g->mm.bar2.vm; + int err = 0; + size_t fb_size; + + /* Max entries take care of 1 entry used for full detection */ + fb_size = (g->ops.fifo.get_num_fifos(g) + 1) * + gmmu_fault_buf_size_v(); + + err = nvgpu_dma_alloc_map_sys(vm, fb_size, + &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]); + if (err) { + nvgpu_err(g, + "Error in hw mmu fault buf [0] alloc in bar2 vm "); + /* Fault will be snapped in pri reg but not in buffer */ + return; + } + + g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] = + HW_FAULT_BUF_STATUS_ALLOC_TRUE; + *hub_intr_types |= HUB_INTR_TYPE_NONREPLAY; + + err = nvgpu_dma_alloc_map_sys(vm, fb_size, + &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]); + if (err) { + nvgpu_err(g, + "Error in hw mmu fault buf [1] alloc in bar2 vm "); + /* Fault will be snapped in pri reg but not in buffer */ + return; + } + g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] = + HW_FAULT_BUF_STATUS_ALLOC_TRUE; + *hub_intr_types |= HUB_INTR_TYPE_REPLAY; +} + +static void gv11b_mm_mmu_hw_fault_buf_deinit(struct gk20a *g) +{ + struct vm_gk20a *vm = g->mm.bar2.vm; + + nvgpu_log_fn(g, " "); + + gv11b_fb_disable_hub_intr(g, STALL_REG_INDEX, HUB_INTR_TYPE_NONREPLAY | + HUB_INTR_TYPE_REPLAY); + + g->mm.hub_intr_types &= (~(HUB_INTR_TYPE_NONREPLAY | + HUB_INTR_TYPE_REPLAY)); + + if ((gv11b_fb_is_fault_buf_enabled(g, NONREPLAY_REG_INDEX))) { + gv11b_fb_fault_buf_set_state_hw(g, NONREPLAY_REG_INDEX, + FAULT_BUF_DISABLED); + } + + if ((gv11b_fb_is_fault_buf_enabled(g, REPLAY_REG_INDEX))) { + gv11b_fb_fault_buf_set_state_hw(g, REPLAY_REG_INDEX, + FAULT_BUF_DISABLED); + } + + if (g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] == + HW_FAULT_BUF_STATUS_ALLOC_TRUE) { + nvgpu_dma_unmap_free(vm, + &g->mm.hw_fault_buf[FAULT_TYPE_OTHER_AND_NONREPLAY]); + g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] = + HW_FAULT_BUF_STATUS_ALLOC_FALSE; + } + + if (g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] == + HW_FAULT_BUF_STATUS_ALLOC_TRUE) { + nvgpu_dma_unmap_free(vm, + &g->mm.hw_fault_buf[FAULT_TYPE_REPLAY]); + g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] = + HW_FAULT_BUF_STATUS_ALLOC_FALSE; + } +} + +void gv11b_mm_remove_bar2_vm(struct gk20a *g) +{ + struct mm_gk20a *mm = &g->mm; + + nvgpu_log_fn(g, " "); + + gv11b_mm_mmu_hw_fault_buf_deinit(g); + + nvgpu_free_inst_block(g, &mm->bar2.inst_block); + nvgpu_vm_put(mm->bar2.vm); +} + +static void gv11b_mm_mmu_fault_setup_hw(struct gk20a *g) +{ + if (g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] == + HW_FAULT_BUF_STATUS_ALLOC_TRUE) { + gv11b_fb_fault_buf_configure_hw(g, NONREPLAY_REG_INDEX); + } + if (g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] == + HW_FAULT_BUF_STATUS_ALLOC_TRUE) { + gv11b_fb_fault_buf_configure_hw(g, REPLAY_REG_INDEX); + } +} + +static int gv11b_mm_mmu_fault_setup_sw(struct gk20a *g) +{ + int err; + + nvgpu_log_fn(g, " "); + + nvgpu_mutex_init(&g->mm.hub_isr_mutex); + + g->mm.hw_fault_buf_status[NONREPLAY_REG_INDEX] = + HW_FAULT_BUF_STATUS_ALLOC_FALSE; + g->mm.hw_fault_buf_status[REPLAY_REG_INDEX] = + HW_FAULT_BUF_STATUS_ALLOC_FALSE; + + g->mm.hub_intr_types = HUB_INTR_TYPE_ECC_UNCORRECTED; + + err = gv11b_mm_mmu_fault_info_buf_init(g, &g->mm.hub_intr_types); + + if (!err) + gv11b_mm_mmu_hw_fault_buf_init(g, &g->mm.hub_intr_types); + + return err; +} + +int gv11b_init_mm_setup_hw(struct gk20a *g) +{ + int err = 0; + + nvgpu_log_fn(g, " "); + + g->ops.fb.set_mmu_page_size(g); + g->ops.fb.init_hw(g); + + err = g->ops.mm.init_bar2_mm_hw_setup(g); + if (err) + return err; + + if (gk20a_mm_fb_flush(g) || gk20a_mm_fb_flush(g)) + return -EBUSY; + + err = gv11b_mm_mmu_fault_setup_sw(g); + if (!err) + gv11b_mm_mmu_fault_setup_hw(g); + + nvgpu_log_fn(g, "end"); + + return err; +} + +void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate) +{ + nvgpu_log(g, gpu_dbg_fn, "gv11b_mm_l2_flush"); + + g->ops.mm.fb_flush(g); + gk20a_mm_l2_flush(g, invalidate); + g->ops.mm.fb_flush(g); +} + +/* + * On Volta the GPU determines whether to do L3 allocation for a mapping by + * checking bit 36 of the phsyical address. So if a mapping should allocte lines + * in the L3 this bit must be set. + */ +u64 gv11b_gpu_phys_addr(struct gk20a *g, + struct nvgpu_gmmu_attrs *attrs, u64 phys) +{ + if (attrs && attrs->t19x_attrs.l3_alloc) + return phys | NVGPU_L3_ALLOC_BIT; + + return phys; +} + +int gv11b_init_bar2_mm_hw_setup(struct gk20a *g) +{ + struct mm_gk20a *mm = &g->mm; + struct nvgpu_mem *inst_block = &mm->bar2.inst_block; + u64 inst_pa = nvgpu_inst_block_addr(g, inst_block); + u32 reg_val; + struct nvgpu_timeout timeout; + u32 delay = GR_IDLE_CHECK_DEFAULT; + + nvgpu_log_fn(g, " "); + + g->ops.fb.set_mmu_page_size(g); + + inst_pa = (u32)(inst_pa >> bus_bar2_block_ptr_shift_v()); + nvgpu_log_info(g, "bar2 inst block ptr: 0x%08x", (u32)inst_pa); + + gk20a_writel(g, bus_bar2_block_r(), + nvgpu_aperture_mask(g, inst_block, + bus_bar2_block_target_sys_mem_ncoh_f(), + bus_bar2_block_target_vid_mem_f()) | + bus_bar2_block_mode_virtual_f() | + bus_bar2_block_ptr_f(inst_pa)); + + /* This is needed as BAR1 support is removed and there is no way + * to know if gpu successfully accessed memory. + * To avoid deadlocks and non-deterministic virtual address translation + * behavior, after writing BAR2_BLOCK to bind BAR2 to a virtual address + * space, SW must ensure that the bind has completed prior to issuing + * any further BAR2 requests by polling for both + * BUS_BIND_STATUS_BAR2_PENDING to return to EMPTY and + * BUS_BIND_STATUS_BAR2_OUTSTANDING to return to FALSE + */ + nvgpu_timeout_init(g, &timeout, gk20a_get_gr_idle_timeout(g), + NVGPU_TIMER_CPU_TIMER); + nvgpu_log_info(g, "check bar2 bind status"); + do { + reg_val = gk20a_readl(g, bus_bind_status_r()); + + if (!((reg_val & bus_bind_status_bar2_pending_busy_f()) || + (reg_val & bus_bind_status_bar2_outstanding_true_f()))) + return 0; + + nvgpu_usleep_range(delay, delay * 2); + delay = min_t(u32, delay << 1, GR_IDLE_CHECK_MAX); + } while (!nvgpu_timeout_expired_msg(&timeout, "bar2 bind timedout")); + + nvgpu_err(g, "bar2 bind failed. gpu unable to access memory"); + return -EBUSY; +} diff --git a/drivers/gpu/nvgpu/gv11b/mm_gv11b.h b/drivers/gpu/nvgpu/gv11b/mm_gv11b.h new file mode 100644 index 000000000..d830b7cc6 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/mm_gv11b.h @@ -0,0 +1,46 @@ +/* + * GV11B MM + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef MM_GV11B_H +#define MM_GV11B_H + +#define HW_FAULT_BUF_STATUS_ALLOC_TRUE 1 +#define HW_FAULT_BUF_STATUS_ALLOC_FALSE 0 + +struct gk20a; +struct nvgpu_mem; +struct vm_gk20a; + +bool gv11b_mm_is_bar1_supported(struct gk20a *g); +void gv11b_init_inst_block(struct nvgpu_mem *inst_block, + struct vm_gk20a *vm, u32 big_page_size); +bool gv11b_mm_mmu_fault_pending(struct gk20a *g); +void gv11b_mm_remove_bar2_vm(struct gk20a *g); +int gv11b_init_mm_setup_hw(struct gk20a *g); +int gv11b_init_bar2_mm_hw_setup(struct gk20a *g); +void gv11b_mm_l2_flush(struct gk20a *g, bool invalidate); +u64 gv11b_gpu_phys_addr(struct gk20a *g, + struct nvgpu_gmmu_attrs *attrs, u64 phys); +void gv11b_mm_fault_info_mem_destroy(struct gk20a *g); + +#endif diff --git a/drivers/gpu/nvgpu/gv11b/platform_gv11b_tegra.c b/drivers/gpu/nvgpu/gv11b/platform_gv11b_tegra.c new file mode 100644 index 000000000..95d82254b --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/platform_gv11b_tegra.c @@ -0,0 +1,549 @@ +/* + * GV11B Tegra Platform Interface + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include "gk20a/gk20a.h" +#include "common/linux/platform_gk20a.h" +#include "common/linux/clk.h" + +#include "gp10b/platform_gp10b.h" +#include "common/linux/platform_gp10b_tegra.h" + +#include "common/linux/os_linux.h" +#include "common/linux/platform_gk20a_tegra.h" +#include "gr_gv11b.h" +#include "nvgpu_gpuid_t19x.h" + +static void gr_gv11b_remove_sysfs(struct device *dev); + +static int gv11b_tegra_probe(struct device *dev) +{ + struct gk20a_platform *platform = dev_get_drvdata(dev); +#ifdef CONFIG_TEGRA_GK20A_NVHOST + struct gk20a *g = platform->g; + int err = 0; + + err = nvgpu_get_nvhost_dev(g); + if (err) { + dev_err(dev, "host1x device not available"); + return err; + } + + err = nvgpu_nvhost_syncpt_unit_interface_get_aperture( + g->nvhost_dev, + &g->syncpt_unit_base, + &g->syncpt_unit_size); + if (err) { + dev_err(dev, "Failed to get syncpt interface"); + return -ENOSYS; + } + g->syncpt_size = nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(1); + gk20a_dbg_info("syncpt_unit_base %llx syncpt_unit_size %zx size %x\n", + g->syncpt_unit_base, g->syncpt_unit_size, + g->syncpt_size); +#endif + + platform->bypass_smmu = !device_is_iommuable(dev); + platform->disable_bigpage = platform->bypass_smmu; + + platform->g->gr.t18x.ctx_vars.dump_ctxsw_stats_on_channel_close + = false; + platform->g->gr.t18x.ctx_vars.dump_ctxsw_stats_on_channel_close + = false; + + platform->g->gr.t18x.ctx_vars.force_preemption_gfxp = false; + platform->g->gr.t18x.ctx_vars.force_preemption_cilp = false; + + gp10b_tegra_get_clocks(dev); + nvgpu_linux_init_clk_support(platform->g); + + return 0; +} + +static int gv11b_tegra_remove(struct device *dev) +{ + gp10b_tegra_remove(dev); + + gr_gv11b_remove_sysfs(dev); + + return 0; +} + +static bool gv11b_tegra_is_railgated(struct device *dev) +{ + bool ret = false; +#ifdef TEGRA194_POWER_DOMAIN_GPU + struct gk20a *g = get_gk20a(dev); + + if (tegra_bpmp_running()) { + nvgpu_log(g, gpu_dbg_info, "bpmp running"); + ret = !tegra_powergate_is_powered(TEGRA194_POWER_DOMAIN_GPU); + + nvgpu_log(g, gpu_dbg_info, "railgated? %s", ret ? "yes" : "no"); + } else { + nvgpu_log(g, gpu_dbg_info, "bpmp not running"); + } +#endif + return ret; +} + +static int gv11b_tegra_railgate(struct device *dev) +{ +#ifdef TEGRA194_POWER_DOMAIN_GPU + struct gk20a_platform *platform = gk20a_get_platform(dev); + struct gk20a *g = get_gk20a(dev); + int i; + + if (tegra_bpmp_running()) { + nvgpu_log(g, gpu_dbg_info, "bpmp running"); + if (!tegra_powergate_is_powered(TEGRA194_POWER_DOMAIN_GPU)) { + nvgpu_log(g, gpu_dbg_info, "powergate is not powered"); + return 0; + } + nvgpu_log(g, gpu_dbg_info, "clk_disable_unprepare"); + for (i = 0; i < platform->num_clks; i++) { + if (platform->clk[i]) + clk_disable_unprepare(platform->clk[i]); + } + nvgpu_log(g, gpu_dbg_info, "powergate_partition"); + tegra_powergate_partition(TEGRA194_POWER_DOMAIN_GPU); + } else { + nvgpu_log(g, gpu_dbg_info, "bpmp not running"); + } +#endif + return 0; +} + +static int gv11b_tegra_unrailgate(struct device *dev) +{ + int ret = 0; +#ifdef TEGRA194_POWER_DOMAIN_GPU + struct gk20a_platform *platform = gk20a_get_platform(dev); + struct gk20a *g = get_gk20a(dev); + int i; + + if (tegra_bpmp_running()) { + nvgpu_log(g, gpu_dbg_info, "bpmp running"); + ret = tegra_unpowergate_partition(TEGRA194_POWER_DOMAIN_GPU); + if (ret) { + nvgpu_log(g, gpu_dbg_info, + "unpowergate partition failed"); + return ret; + } + nvgpu_log(g, gpu_dbg_info, "clk_prepare_enable"); + for (i = 0; i < platform->num_clks; i++) { + if (platform->clk[i]) + clk_prepare_enable(platform->clk[i]); + } + } else { + nvgpu_log(g, gpu_dbg_info, "bpmp not running"); + } +#endif + return ret; +} + +static int gv11b_tegra_suspend(struct device *dev) +{ + return 0; +} + +struct gk20a_platform t19x_gpu_tegra_platform = { + .has_syncpoints = true, + + /* power management configuration */ + + /* ptimer src frequency in hz*/ + .ptimer_src_freq = 31250000, + + .probe = gv11b_tegra_probe, + .remove = gv11b_tegra_remove, + + .enable_slcg = false, + .enable_blcg = false, + .enable_elcg = false, + .can_slcg = false, + .can_blcg = false, + .can_elcg = false, + + /* power management callbacks */ + .suspend = gv11b_tegra_suspend, + .railgate = gv11b_tegra_railgate, + .unrailgate = gv11b_tegra_unrailgate, + .is_railgated = gv11b_tegra_is_railgated, + + .busy = gk20a_tegra_busy, + .idle = gk20a_tegra_idle, + + .dump_platform_dependencies = gk20a_tegra_debug_dump, + + .soc_name = "tegra19x", + + .honors_aperture = true, + .unified_memory = true, + + .reset_assert = gp10b_tegra_reset_assert, + .reset_deassert = gp10b_tegra_reset_deassert, +}; + +static struct device_attribute *dev_attr_sm_l1_tag_ecc_corrected_err_count_array; +static struct device_attribute *dev_attr_sm_l1_tag_ecc_uncorrected_err_count_array; +static struct device_attribute *dev_attr_sm_cbu_ecc_corrected_err_count_array; +static struct device_attribute *dev_attr_sm_cbu_ecc_uncorrected_err_count_array; +static struct device_attribute *dev_attr_sm_l1_data_ecc_corrected_err_count_array; +static struct device_attribute *dev_attr_sm_l1_data_ecc_uncorrected_err_count_array; +static struct device_attribute *dev_attr_sm_icache_ecc_corrected_err_count_array; +static struct device_attribute *dev_attr_sm_icache_ecc_uncorrected_err_count_array; +static struct device_attribute *dev_attr_gcc_l15_ecc_corrected_err_count_array; +static struct device_attribute *dev_attr_gcc_l15_ecc_uncorrected_err_count_array; +static struct device_attribute *dev_attr_mmu_l1tlb_ecc_corrected_err_count_array; +static struct device_attribute *dev_attr_mmu_l1tlb_ecc_uncorrected_err_count_array; + +static struct device_attribute *dev_attr_fecs_ecc_corrected_err_count_array; +static struct device_attribute *dev_attr_fecs_ecc_uncorrected_err_count_array; +static struct device_attribute *dev_attr_gpccs_ecc_corrected_err_count_array; +static struct device_attribute *dev_attr_gpccs_ecc_uncorrected_err_count_array; + +static struct device_attribute *dev_attr_l2_cache_ecc_corrected_err_count_array; +static struct device_attribute *dev_attr_l2_cache_ecc_uncorrected_err_count_array; + +static struct device_attribute *dev_attr_mmu_l2tlb_ecc_corrected_err_count_array; +static struct device_attribute *dev_attr_mmu_l2tlb_ecc_uncorrected_err_count_array; +static struct device_attribute *dev_attr_mmu_hubtlb_ecc_corrected_err_count_array; +static struct device_attribute *dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_array; +static struct device_attribute *dev_attr_mmu_fillunit_ecc_corrected_err_count_array; +static struct device_attribute *dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array; + +void gr_gv11b_create_sysfs(struct gk20a *g) +{ + struct device *dev = dev_from_gk20a(g); + int error = 0; + /* This stat creation function is called on GR init. GR can get + initialized multiple times but we only need to create the ECC + stats once. Therefore, add the following check to avoid + creating duplicate stat sysfs nodes. */ + if (g->ecc.gr.t19x.sm_l1_tag_corrected_err_count.counters != NULL) + return; + + gr_gp10b_create_sysfs(g); + + error |= gr_gp10b_ecc_stat_create(dev, + 0, + "sm_l1_tag_ecc_corrected_err_count", + &g->ecc.gr.t19x.sm_l1_tag_corrected_err_count, + &dev_attr_sm_l1_tag_ecc_corrected_err_count_array); + + error |= gr_gp10b_ecc_stat_create(dev, + 0, + "sm_l1_tag_ecc_uncorrected_err_count", + &g->ecc.gr.t19x.sm_l1_tag_uncorrected_err_count, + &dev_attr_sm_l1_tag_ecc_uncorrected_err_count_array); + + error |= gr_gp10b_ecc_stat_create(dev, + 0, + "sm_cbu_ecc_corrected_err_count", + &g->ecc.gr.t19x.sm_cbu_corrected_err_count, + &dev_attr_sm_cbu_ecc_corrected_err_count_array); + + error |= gr_gp10b_ecc_stat_create(dev, + 0, + "sm_cbu_ecc_uncorrected_err_count", + &g->ecc.gr.t19x.sm_cbu_uncorrected_err_count, + &dev_attr_sm_cbu_ecc_uncorrected_err_count_array); + + error |= gr_gp10b_ecc_stat_create(dev, + 0, + "sm_l1_data_ecc_corrected_err_count", + &g->ecc.gr.t19x.sm_l1_data_corrected_err_count, + &dev_attr_sm_l1_data_ecc_corrected_err_count_array); + + error |= gr_gp10b_ecc_stat_create(dev, + 0, + "sm_l1_data_ecc_uncorrected_err_count", + &g->ecc.gr.t19x.sm_l1_data_uncorrected_err_count, + &dev_attr_sm_l1_data_ecc_uncorrected_err_count_array); + + error |= gr_gp10b_ecc_stat_create(dev, + 0, + "sm_icache_ecc_corrected_err_count", + &g->ecc.gr.t19x.sm_icache_corrected_err_count, + &dev_attr_sm_icache_ecc_corrected_err_count_array); + + error |= gr_gp10b_ecc_stat_create(dev, + 0, + "sm_icache_ecc_uncorrected_err_count", + &g->ecc.gr.t19x.sm_icache_uncorrected_err_count, + &dev_attr_sm_icache_ecc_uncorrected_err_count_array); + + error |= gr_gp10b_ecc_stat_create(dev, + 0, + "gcc_l15_ecc_corrected_err_count", + &g->ecc.gr.t19x.gcc_l15_corrected_err_count, + &dev_attr_gcc_l15_ecc_corrected_err_count_array); + + error |= gr_gp10b_ecc_stat_create(dev, + 0, + "gcc_l15_ecc_uncorrected_err_count", + &g->ecc.gr.t19x.gcc_l15_uncorrected_err_count, + &dev_attr_gcc_l15_ecc_uncorrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + g->ltc_count, + "ltc", + "l2_cache_uncorrected_err_count", + &g->ecc.ltc.t19x.l2_cache_uncorrected_err_count, + &dev_attr_l2_cache_ecc_uncorrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + g->ltc_count, + "ltc", + "l2_cache_corrected_err_count", + &g->ecc.ltc.t19x.l2_cache_corrected_err_count, + &dev_attr_l2_cache_ecc_corrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + 1, + "gpc", + "fecs_ecc_uncorrected_err_count", + &g->ecc.gr.t19x.fecs_uncorrected_err_count, + &dev_attr_fecs_ecc_uncorrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + 1, + "gpc", + "fecs_ecc_corrected_err_count", + &g->ecc.gr.t19x.fecs_corrected_err_count, + &dev_attr_fecs_ecc_corrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + g->gr.gpc_count, + "gpc", + "gpccs_ecc_uncorrected_err_count", + &g->ecc.gr.t19x.gpccs_uncorrected_err_count, + &dev_attr_gpccs_ecc_uncorrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + g->gr.gpc_count, + "gpc", + "gpccs_ecc_corrected_err_count", + &g->ecc.gr.t19x.gpccs_corrected_err_count, + &dev_attr_gpccs_ecc_corrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + g->gr.gpc_count, + "gpc", + "mmu_l1tlb_ecc_uncorrected_err_count", + &g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count, + &dev_attr_mmu_l1tlb_ecc_uncorrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + g->gr.gpc_count, + "gpc", + "mmu_l1tlb_ecc_corrected_err_count", + &g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count, + &dev_attr_mmu_l1tlb_ecc_corrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + 1, + "eng", + "mmu_l2tlb_ecc_uncorrected_err_count", + &g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count, + &dev_attr_mmu_l2tlb_ecc_uncorrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + 1, + "eng", + "mmu_l2tlb_ecc_corrected_err_count", + &g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count, + &dev_attr_mmu_l2tlb_ecc_corrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + 1, + "eng", + "mmu_hubtlb_ecc_uncorrected_err_count", + &g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count, + &dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + 1, + "eng", + "mmu_hubtlb_ecc_corrected_err_count", + &g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count, + &dev_attr_mmu_hubtlb_ecc_corrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + 1, + "eng", + "mmu_fillunit_ecc_uncorrected_err_count", + &g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count, + &dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array); + + error |= gp10b_ecc_stat_create(dev, + 1, + "eng", + "mmu_fillunit_ecc_corrected_err_count", + &g->ecc.eng.t19x.mmu_fillunit_corrected_err_count, + &dev_attr_mmu_fillunit_ecc_corrected_err_count_array); + + if (error) + dev_err(dev, "Failed to create gv11b sysfs attributes!\n"); +} + +static void gr_gv11b_remove_sysfs(struct device *dev) +{ + struct gk20a *g = get_gk20a(dev); + + gr_gp10b_ecc_stat_remove(dev, + 0, + &g->ecc.gr.t19x.sm_l1_tag_corrected_err_count, + dev_attr_sm_l1_tag_ecc_corrected_err_count_array); + + gr_gp10b_ecc_stat_remove(dev, + 0, + &g->ecc.gr.t19x.sm_l1_tag_uncorrected_err_count, + dev_attr_sm_l1_tag_ecc_uncorrected_err_count_array); + + gr_gp10b_ecc_stat_remove(dev, + 0, + &g->ecc.gr.t19x.sm_cbu_corrected_err_count, + dev_attr_sm_cbu_ecc_corrected_err_count_array); + + gr_gp10b_ecc_stat_remove(dev, + 0, + &g->ecc.gr.t19x.sm_cbu_uncorrected_err_count, + dev_attr_sm_cbu_ecc_uncorrected_err_count_array); + + gr_gp10b_ecc_stat_remove(dev, + 0, + &g->ecc.gr.t19x.sm_l1_data_corrected_err_count, + dev_attr_sm_l1_data_ecc_corrected_err_count_array); + + gr_gp10b_ecc_stat_remove(dev, + 0, + &g->ecc.gr.t19x.sm_l1_data_uncorrected_err_count, + dev_attr_sm_l1_data_ecc_uncorrected_err_count_array); + + gr_gp10b_ecc_stat_remove(dev, + 0, + &g->ecc.gr.t19x.sm_icache_corrected_err_count, + dev_attr_sm_icache_ecc_corrected_err_count_array); + + gr_gp10b_ecc_stat_remove(dev, + 0, + &g->ecc.gr.t19x.sm_icache_uncorrected_err_count, + dev_attr_sm_icache_ecc_uncorrected_err_count_array); + + gr_gp10b_ecc_stat_remove(dev, + 0, + &g->ecc.gr.t19x.gcc_l15_corrected_err_count, + dev_attr_gcc_l15_ecc_corrected_err_count_array); + + gr_gp10b_ecc_stat_remove(dev, + 0, + &g->ecc.gr.t19x.gcc_l15_uncorrected_err_count, + dev_attr_gcc_l15_ecc_uncorrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + g->ltc_count, + &g->ecc.ltc.t19x.l2_cache_uncorrected_err_count, + dev_attr_l2_cache_ecc_uncorrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + g->ltc_count, + &g->ecc.ltc.t19x.l2_cache_corrected_err_count, + dev_attr_l2_cache_ecc_corrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + 1, + &g->ecc.gr.t19x.fecs_uncorrected_err_count, + dev_attr_fecs_ecc_uncorrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + 1, + &g->ecc.gr.t19x.fecs_corrected_err_count, + dev_attr_fecs_ecc_corrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + g->gr.gpc_count, + &g->ecc.gr.t19x.gpccs_uncorrected_err_count, + dev_attr_gpccs_ecc_uncorrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + g->gr.gpc_count, + &g->ecc.gr.t19x.gpccs_corrected_err_count, + dev_attr_gpccs_ecc_corrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + g->gr.gpc_count, + &g->ecc.gr.t19x.mmu_l1tlb_uncorrected_err_count, + dev_attr_mmu_l1tlb_ecc_uncorrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + g->gr.gpc_count, + &g->ecc.gr.t19x.mmu_l1tlb_corrected_err_count, + dev_attr_mmu_l1tlb_ecc_corrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + 1, + &g->ecc.eng.t19x.mmu_l2tlb_uncorrected_err_count, + dev_attr_mmu_l2tlb_ecc_uncorrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + 1, + &g->ecc.eng.t19x.mmu_l2tlb_corrected_err_count, + dev_attr_mmu_l2tlb_ecc_corrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + 1, + &g->ecc.eng.t19x.mmu_hubtlb_uncorrected_err_count, + dev_attr_mmu_hubtlb_ecc_uncorrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + 1, + &g->ecc.eng.t19x.mmu_hubtlb_corrected_err_count, + dev_attr_mmu_hubtlb_ecc_corrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + 1, + &g->ecc.eng.t19x.mmu_fillunit_uncorrected_err_count, + dev_attr_mmu_fillunit_ecc_uncorrected_err_count_array); + + gp10b_ecc_stat_remove(dev, + 1, + &g->ecc.eng.t19x.mmu_fillunit_corrected_err_count, + dev_attr_mmu_fillunit_ecc_corrected_err_count_array); +} diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c new file mode 100644 index 000000000..2c7b64575 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.c @@ -0,0 +1,283 @@ +/* + * GV11B PMU + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include /* for udelay */ +#include + +#include + +#include +#include +#include +#include + +#include "gk20a/gk20a.h" + +#include "gp10b/pmu_gp10b.h" +#include "gp106/pmu_gp106.h" + +#include "pmu_gv11b.h" +#include "acr_gv11b.h" + +#include + +#define gv11b_dbg_pmu(fmt, arg...) \ + gk20a_dbg(gpu_dbg_pmu, fmt, ##arg) + +#define ALIGN_4KB 12 + +bool gv11b_is_pmu_supported(struct gk20a *g) +{ + return true; +} + +bool gv11b_is_lazy_bootstrap(u32 falcon_id) +{ + bool enable_status = false; + + switch (falcon_id) { + case LSF_FALCON_ID_FECS: + enable_status = true; + break; + case LSF_FALCON_ID_GPCCS: + enable_status = true; + break; + default: + break; + } + + return enable_status; +} + +bool gv11b_is_priv_load(u32 falcon_id) +{ + bool enable_status = false; + + switch (falcon_id) { + case LSF_FALCON_ID_FECS: + enable_status = true; + break; + case LSF_FALCON_ID_GPCCS: + enable_status = true; + break; + default: + break; + } + + return enable_status; +} + +int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu) +{ + struct gk20a *g = gk20a_from_pmu(pmu); + struct mm_gk20a *mm = &g->mm; + struct pmu_ucode_desc *desc = pmu->desc; + u64 addr_code_lo, addr_data_lo, addr_load_lo; + u64 addr_code_hi, addr_data_hi, addr_load_hi; + u32 i, blocks, addr_args; + + gk20a_dbg_fn(""); + + gk20a_writel(g, pwr_falcon_itfen_r(), + gk20a_readl(g, pwr_falcon_itfen_r()) | + pwr_falcon_itfen_ctxen_enable_f()); + + gk20a_writel(g, pwr_pmu_new_instblk_r(), + pwr_pmu_new_instblk_ptr_f( + nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> ALIGN_4KB) + | pwr_pmu_new_instblk_valid_f(1) + | pwr_pmu_new_instblk_target_sys_ncoh_f()); + + /* TBD: load all other surfaces */ + g->ops.pmu_ver.set_pmu_cmdline_args_trace_size( + pmu, GK20A_PMU_TRACE_BUFSIZE); + g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_base(pmu); + g->ops.pmu_ver.set_pmu_cmdline_args_trace_dma_idx( + pmu, GK20A_PMU_DMAIDX_VIRT); + + g->ops.pmu_ver.set_pmu_cmdline_args_cpu_freq(pmu, + g->ops.clk.get_rate(g, CTRL_CLK_DOMAIN_PWRCLK)); + + addr_args = (pwr_falcon_hwcfg_dmem_size_v( + gk20a_readl(g, pwr_falcon_hwcfg_r())) + << GK20A_PMU_DMEM_BLKSIZE2) - + g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu); + + nvgpu_flcn_copy_to_dmem(pmu->flcn, addr_args, + (u8 *)(g->ops.pmu_ver.get_pmu_cmdline_args_ptr(pmu)), + g->ops.pmu_ver.get_pmu_cmdline_args_size(pmu), 0); + + gk20a_writel(g, pwr_falcon_dmemc_r(0), + pwr_falcon_dmemc_offs_f(0) | + pwr_falcon_dmemc_blk_f(0) | + pwr_falcon_dmemc_aincw_f(1)); + + addr_code_lo = u64_lo32((pmu->ucode.gpu_va + + desc->app_start_offset + + desc->app_resident_code_offset) >> 8); + + addr_code_hi = u64_hi32((pmu->ucode.gpu_va + + desc->app_start_offset + + desc->app_resident_code_offset) >> 8); + addr_data_lo = u64_lo32((pmu->ucode.gpu_va + + desc->app_start_offset + + desc->app_resident_data_offset) >> 8); + addr_data_hi = u64_hi32((pmu->ucode.gpu_va + + desc->app_start_offset + + desc->app_resident_data_offset) >> 8); + addr_load_lo = u64_lo32((pmu->ucode.gpu_va + + desc->bootloader_start_offset) >> 8); + addr_load_hi = u64_hi32((pmu->ucode.gpu_va + + desc->bootloader_start_offset) >> 8); + + gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0); + gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0); + gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0); + gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0); + gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0); + gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0); + gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0); + gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0); + gk20a_writel(g, pwr_falcon_dmemd_r(0), GK20A_PMU_DMAIDX_UCODE); + gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code_lo << 8); + gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_code_hi); + gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_code_offset); + gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_code_size); + gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0); + gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x0); + gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_imem_entry); + gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_data_lo << 8); + gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_data_hi); + gk20a_writel(g, pwr_falcon_dmemd_r(0), desc->app_resident_data_size); + gk20a_writel(g, pwr_falcon_dmemd_r(0), 0x1); + gk20a_writel(g, pwr_falcon_dmemd_r(0), addr_args); + + g->ops.pmu.write_dmatrfbase(g, + addr_load_lo - (desc->bootloader_imem_offset >> 8)); + + blocks = ((desc->bootloader_size + 0xFF) & ~0xFF) >> 8; + + for (i = 0; i < blocks; i++) { + gk20a_writel(g, pwr_falcon_dmatrfmoffs_r(), + desc->bootloader_imem_offset + (i << 8)); + gk20a_writel(g, pwr_falcon_dmatrffboffs_r(), + desc->bootloader_imem_offset + (i << 8)); + gk20a_writel(g, pwr_falcon_dmatrfcmd_r(), + pwr_falcon_dmatrfcmd_imem_f(1) | + pwr_falcon_dmatrfcmd_write_f(0) | + pwr_falcon_dmatrfcmd_size_f(6) | + pwr_falcon_dmatrfcmd_ctxdma_f(GK20A_PMU_DMAIDX_UCODE)); + } + + nvgpu_flcn_bootstrap(pmu->flcn, desc->bootloader_entry_point); + + gk20a_writel(g, pwr_falcon_os_r(), desc->app_version); + + return 0; +} + +static void pmu_handle_pg_sub_feature_msg(struct gk20a *g, struct pmu_msg *msg, + void *param, u32 handle, u32 status) +{ + gk20a_dbg_fn(""); + + if (status != 0) { + nvgpu_err(g, "Sub-feature mask update cmd aborted\n"); + return; + } + + gv11b_dbg_pmu("sub-feature mask update is acknowledged from PMU %x\n", + msg->msg.pg.msg_type); +} + +static void pmu_handle_pg_param_msg(struct gk20a *g, struct pmu_msg *msg, + void *param, u32 handle, u32 status) +{ + gk20a_dbg_fn(""); + + if (status != 0) { + nvgpu_err(g, "GR PARAM cmd aborted\n"); + return; + } + + gv11b_dbg_pmu("GR PARAM is acknowledged from PMU %x\n", + msg->msg.pg.msg_type); +} + +int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id) +{ + struct nvgpu_pmu *pmu = &g->pmu; + struct pmu_cmd cmd; + u32 seq; + + if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { + memset(&cmd, 0, sizeof(struct pmu_cmd)); + cmd.hdr.unit_id = PMU_UNIT_PG; + cmd.hdr.size = PMU_CMD_HDR_SIZE + + sizeof(struct pmu_pg_cmd_gr_init_param_v1); + cmd.cmd.pg.gr_init_param_v1.cmd_type = + PMU_PG_CMD_ID_PG_PARAM; + cmd.cmd.pg.gr_init_param_v1.sub_cmd_id = + PMU_PG_PARAM_CMD_GR_INIT_PARAM; + cmd.cmd.pg.gr_init_param_v1.featuremask = + PMU_PG_FEATURE_GR_POWER_GATING_ENABLED; + + gv11b_dbg_pmu("cmd post PMU_PG_CMD_ID_PG_PARAM_INIT\n"); + nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, + pmu_handle_pg_param_msg, pmu, &seq, ~0); + + } else + return -EINVAL; + + return 0; +} + +int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id) +{ + struct nvgpu_pmu *pmu = &g->pmu; + struct pmu_cmd cmd; + u32 seq; + + if (pg_engine_id == PMU_PG_ELPG_ENGINE_ID_GRAPHICS) { + memset(&cmd, 0, sizeof(struct pmu_cmd)); + cmd.hdr.unit_id = PMU_UNIT_PG; + cmd.hdr.size = PMU_CMD_HDR_SIZE + + sizeof(struct pmu_pg_cmd_sub_feature_mask_update); + cmd.cmd.pg.sf_mask_update.cmd_type = + PMU_PG_CMD_ID_PG_PARAM; + cmd.cmd.pg.sf_mask_update.sub_cmd_id = + PMU_PG_PARAM_CMD_SUB_FEATURE_MASK_UPDATE; + cmd.cmd.pg.sf_mask_update.ctrl_id = + PMU_PG_ELPG_ENGINE_ID_GRAPHICS; + cmd.cmd.pg.sf_mask_update.enabled_mask = + PMU_PG_FEATURE_GR_POWER_GATING_ENABLED; + + gv11b_dbg_pmu("cmd post PMU_PG_CMD_SUB_FEATURE_MASK_UPDATE\n"); + nvgpu_pmu_cmd_post(g, &cmd, NULL, NULL, PMU_COMMAND_QUEUE_HPQ, + pmu_handle_pg_sub_feature_msg, pmu, &seq, ~0); + } else + return -EINVAL; + + return 0; +} diff --git a/drivers/gpu/nvgpu/gv11b/pmu_gv11b.h b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.h new file mode 100644 index 000000000..809970ffe --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/pmu_gv11b.h @@ -0,0 +1,37 @@ +/* + * GV11B PMU + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __PMU_GV11B_H_ +#define __PMU_GV11B_H_ + +struct gk20a; + +bool gv11b_is_pmu_supported(struct gk20a *g); +int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu); +int gv11b_pg_gr_init(struct gk20a *g, u32 pg_engine_id); +int gv11b_pg_set_subfeature_mask(struct gk20a *g, u32 pg_engine_id); +bool gv11b_is_lazy_bootstrap(u32 falcon_id); +bool gv11b_is_priv_load(u32 falcon_id); + +#endif /*__PMU_GV11B_H_*/ diff --git a/drivers/gpu/nvgpu/gv11b/regops_gv11b.c b/drivers/gpu/nvgpu/gv11b/regops_gv11b.c new file mode 100644 index 000000000..c356785e3 --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/regops_gv11b.c @@ -0,0 +1,1548 @@ +/* + * Tegra GV11b GPU Driver Register Ops + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include + +#include "gk20a/gk20a.h" +#include "gk20a/dbg_gpu_gk20a.h" +#include "gk20a/regops_gk20a.h" +#include "regops_gv11b.h" + +static const struct regop_offset_range gv11b_global_whitelist_ranges[] = { + { 0x000004f0, 1}, + { 0x00001a00, 1}, + { 0x00009400, 1}, + { 0x00009410, 1}, + { 0x00009480, 1}, + { 0x00020200, 32}, + { 0x00021c04, 2}, + { 0x00021c14, 3}, + { 0x00021c24, 71}, + { 0x00021d44, 1}, + { 0x00021d4c, 1}, + { 0x00021d54, 1}, + { 0x00021d5c, 1}, + { 0x00021d68, 19}, + { 0x00021dbc, 16}, + { 0x00022430, 7}, + { 0x00022450, 1}, + { 0x0002245c, 2}, + { 0x00070000, 5}, + { 0x000840a8, 1}, + { 0x00084b5c, 1}, + { 0x000870a8, 1}, + { 0x000884e0, 1}, + { 0x00100c18, 3}, + { 0x00100c84, 1}, + { 0x0010a0a8, 1}, + { 0x0010a4f0, 1}, + { 0x0013c808, 2}, + { 0x0013cc14, 1}, + { 0x0013ec18, 1}, + { 0x00140028, 1}, + { 0x00140280, 1}, + { 0x001402a0, 1}, + { 0x00140350, 1}, + { 0x00140480, 1}, + { 0x001404a0, 1}, + { 0x00140550, 1}, + { 0x00140680, 1}, + { 0x001406a0, 1}, + { 0x00140750, 1}, + { 0x00142028, 1}, + { 0x00142280, 1}, + { 0x001422a0, 1}, + { 0x00142350, 1}, + { 0x00142480, 1}, + { 0x001424a0, 1}, + { 0x00142550, 1}, + { 0x00142680, 1}, + { 0x001426a0, 1}, + { 0x00142750, 1}, + { 0x0017e028, 1}, + { 0x0017e280, 1}, + { 0x0017e294, 1}, + { 0x0017e29c, 2}, + { 0x0017e2ac, 1}, + { 0x0017e350, 1}, + { 0x0017e39c, 1}, + { 0x0017e480, 1}, + { 0x0017e4a0, 1}, + { 0x0017e550, 1}, + { 0x0017e680, 1}, + { 0x0017e6a0, 1}, + { 0x0017e750, 1}, + { 0x00180040, 41}, + { 0x001800ec, 1}, + { 0x001800f8, 7}, + { 0x00180120, 2}, + { 0x00180240, 41}, + { 0x001802ec, 1}, + { 0x001802f8, 7}, + { 0x00180320, 2}, + { 0x00180440, 41}, + { 0x001804ec, 1}, + { 0x001804f8, 7}, + { 0x00180520, 2}, + { 0x00180640, 41}, + { 0x001806ec, 1}, + { 0x001806f8, 7}, + { 0x00180720, 2}, + { 0x00180840, 41}, + { 0x001808ec, 1}, + { 0x001808f8, 7}, + { 0x00180920, 2}, + { 0x00180a40, 41}, + { 0x00180aec, 1}, + { 0x00180af8, 7}, + { 0x00180b20, 2}, + { 0x00180c40, 41}, + { 0x00180cec, 1}, + { 0x00180cf8, 2}, + { 0x00180d04, 4}, + { 0x00180d20, 2}, + { 0x00180e40, 41}, + { 0x00180eec, 1}, + { 0x00180ef8, 2}, + { 0x00180f04, 4}, + { 0x00180f20, 2}, + { 0x00181040, 41}, + { 0x001810ec, 1}, + { 0x001810f8, 2}, + { 0x00181104, 4}, + { 0x00181120, 2}, + { 0x00181240, 41}, + { 0x001812ec, 1}, + { 0x001812f8, 2}, + { 0x00181304, 4}, + { 0x00181320, 2}, + { 0x00181440, 41}, + { 0x001814ec, 1}, + { 0x001814f8, 2}, + { 0x00181504, 4}, + { 0x00181520, 2}, + { 0x00181640, 41}, + { 0x001816ec, 1}, + { 0x001816f8, 2}, + { 0x00181704, 4}, + { 0x00181720, 2}, + { 0x00181840, 41}, + { 0x001818ec, 1}, + { 0x001818f8, 2}, + { 0x00181904, 4}, + { 0x00181920, 2}, + { 0x00181a40, 41}, + { 0x00181aec, 1}, + { 0x00181af8, 2}, + { 0x00181b04, 4}, + { 0x00181b20, 2}, + { 0x00181c40, 41}, + { 0x00181cec, 1}, + { 0x00181cf8, 2}, + { 0x00181d04, 4}, + { 0x00181d20, 2}, + { 0x00181e40, 41}, + { 0x00181eec, 1}, + { 0x00181ef8, 2}, + { 0x00181f04, 4}, + { 0x00181f20, 2}, + { 0x00182040, 41}, + { 0x001820ec, 1}, + { 0x001820f8, 2}, + { 0x00182104, 4}, + { 0x00182120, 2}, + { 0x00182240, 41}, + { 0x001822ec, 1}, + { 0x001822f8, 2}, + { 0x00182304, 4}, + { 0x00182320, 2}, + { 0x00182440, 41}, + { 0x001824ec, 1}, + { 0x001824f8, 2}, + { 0x00182504, 4}, + { 0x00182520, 2}, + { 0x00182640, 41}, + { 0x001826ec, 1}, + { 0x001826f8, 2}, + { 0x00182704, 4}, + { 0x00182720, 2}, + { 0x00182840, 41}, + { 0x001828ec, 1}, + { 0x001828f8, 2}, + { 0x00182904, 4}, + { 0x00182920, 2}, + { 0x00182a40, 41}, + { 0x00182aec, 1}, + { 0x00182af8, 2}, + { 0x00182b04, 4}, + { 0x00182b20, 2}, + { 0x00182c40, 41}, + { 0x00182cec, 1}, + { 0x00182cf8, 2}, + { 0x00182d04, 4}, + { 0x00182d20, 2}, + { 0x00182e40, 41}, + { 0x00182eec, 1}, + { 0x00182ef8, 2}, + { 0x00182f04, 4}, + { 0x00182f20, 2}, + { 0x00183040, 41}, + { 0x001830ec, 1}, + { 0x001830f8, 2}, + { 0x00183104, 4}, + { 0x00183120, 2}, + { 0x00183240, 41}, + { 0x001832ec, 1}, + { 0x001832f8, 2}, + { 0x00183304, 4}, + { 0x00183320, 2}, + { 0x00183440, 41}, + { 0x001834ec, 1}, + { 0x001834f8, 2}, + { 0x00183504, 4}, + { 0x00183520, 2}, + { 0x00183640, 41}, + { 0x001836ec, 1}, + { 0x001836f8, 2}, + { 0x00183704, 4}, + { 0x00183720, 2}, + { 0x00183840, 41}, + { 0x001838ec, 1}, + { 0x001838f8, 2}, + { 0x00183904, 4}, + { 0x00183920, 2}, + { 0x00183a40, 41}, + { 0x00183aec, 1}, + { 0x00183af8, 2}, + { 0x00183b04, 4}, + { 0x00183b20, 2}, + { 0x00183c40, 41}, + { 0x00183cec, 1}, + { 0x00183cf8, 2}, + { 0x00183d04, 4}, + { 0x00183d20, 2}, + { 0x00183e40, 41}, + { 0x00183eec, 1}, + { 0x00183ef8, 2}, + { 0x00183f04, 4}, + { 0x00183f20, 2}, + { 0x001c80a8, 1}, + { 0x001c9100, 1}, + { 0x001cc0a8, 1}, + { 0x001cd100, 1}, + { 0x001d00a8, 1}, + { 0x001d1100, 1}, + { 0x00200040, 41}, + { 0x002000ec, 1}, + { 0x002000f8, 7}, + { 0x00200120, 2}, + { 0x00200240, 41}, + { 0x002002ec, 1}, + { 0x002002f8, 7}, + { 0x00200320, 2}, + { 0x00200440, 41}, + { 0x002004ec, 1}, + { 0x002004f8, 7}, + { 0x00200520, 2}, + { 0x00200640, 41}, + { 0x002006ec, 1}, + { 0x002006f8, 7}, + { 0x00200720, 2}, + { 0x00200840, 41}, + { 0x002008ec, 1}, + { 0x002008f8, 2}, + { 0x00200904, 4}, + { 0x00200920, 2}, + { 0x00200a40, 41}, + { 0x00200aec, 1}, + { 0x00200af8, 2}, + { 0x00200b04, 4}, + { 0x00200b20, 2}, + { 0x00200c40, 41}, + { 0x00200cec, 1}, + { 0x00200cf8, 2}, + { 0x00200d04, 4}, + { 0x00200d20, 2}, + { 0x00200e40, 41}, + { 0x00200eec, 1}, + { 0x00200ef8, 2}, + { 0x00200f04, 4}, + { 0x00200f20, 2}, + { 0x00201040, 41}, + { 0x002010ec, 1}, + { 0x002010f8, 2}, + { 0x00201104, 4}, + { 0x00201120, 2}, + { 0x00201240, 41}, + { 0x002012ec, 1}, + { 0x002012f8, 2}, + { 0x00201304, 4}, + { 0x00201320, 2}, + { 0x00201440, 41}, + { 0x002014ec, 1}, + { 0x002014f8, 2}, + { 0x00201504, 4}, + { 0x00201520, 2}, + { 0x00201640, 41}, + { 0x002016ec, 1}, + { 0x002016f8, 2}, + { 0x00201704, 4}, + { 0x00201720, 2}, + { 0x00201840, 41}, + { 0x002018ec, 1}, + { 0x002018f8, 2}, + { 0x00201904, 4}, + { 0x00201920, 2}, + { 0x00201a40, 41}, + { 0x00201aec, 1}, + { 0x00201af8, 2}, + { 0x00201b04, 4}, + { 0x00201b20, 2}, + { 0x00201c40, 41}, + { 0x00201cec, 1}, + { 0x00201cf8, 2}, + { 0x00201d04, 4}, + { 0x00201d20, 2}, + { 0x00201e40, 41}, + { 0x00201eec, 1}, + { 0x00201ef8, 2}, + { 0x00201f04, 4}, + { 0x00201f20, 2}, + { 0x00202040, 41}, + { 0x002020ec, 1}, + { 0x002020f8, 2}, + { 0x00202104, 4}, + { 0x00202120, 2}, + { 0x00202240, 41}, + { 0x002022ec, 1}, + { 0x002022f8, 2}, + { 0x00202304, 4}, + { 0x00202320, 2}, + { 0x00202440, 41}, + { 0x002024ec, 1}, + { 0x002024f8, 2}, + { 0x00202504, 4}, + { 0x00202520, 2}, + { 0x00202640, 41}, + { 0x002026ec, 1}, + { 0x002026f8, 2}, + { 0x00202704, 4}, + { 0x00202720, 2}, + { 0x00202840, 41}, + { 0x002028ec, 1}, + { 0x002028f8, 2}, + { 0x00202904, 4}, + { 0x00202920, 2}, + { 0x00202a40, 41}, + { 0x00202aec, 1}, + { 0x00202af8, 2}, + { 0x00202b04, 4}, + { 0x00202b20, 2}, + { 0x00202c40, 41}, + { 0x00202cec, 1}, + { 0x00202cf8, 2}, + { 0x00202d04, 4}, + { 0x00202d20, 2}, + { 0x00202e40, 41}, + { 0x00202eec, 1}, + { 0x00202ef8, 2}, + { 0x00202f04, 4}, + { 0x00202f20, 2}, + { 0x00203040, 41}, + { 0x002030ec, 1}, + { 0x002030f8, 2}, + { 0x00203104, 4}, + { 0x00203120, 2}, + { 0x00203240, 41}, + { 0x002032ec, 1}, + { 0x002032f8, 2}, + { 0x00203304, 4}, + { 0x00203320, 2}, + { 0x00203440, 41}, + { 0x002034ec, 1}, + { 0x002034f8, 2}, + { 0x00203504, 4}, + { 0x00203520, 2}, + { 0x00203640, 41}, + { 0x002036ec, 1}, + { 0x002036f8, 2}, + { 0x00203704, 4}, + { 0x00203720, 2}, + { 0x00203840, 41}, + { 0x002038ec, 1}, + { 0x002038f8, 2}, + { 0x00203904, 4}, + { 0x00203920, 2}, + { 0x00203a40, 41}, + { 0x00203aec, 1}, + { 0x00203af8, 2}, + { 0x00203b04, 4}, + { 0x00203b20, 2}, + { 0x00203c40, 41}, + { 0x00203cec, 1}, + { 0x00203cf8, 2}, + { 0x00203d04, 4}, + { 0x00203d20, 2}, + { 0x00203e40, 41}, + { 0x00203eec, 1}, + { 0x00203ef8, 2}, + { 0x00203f04, 4}, + { 0x00203f20, 2}, + { 0x00240040, 41}, + { 0x002400ec, 1}, + { 0x002400f8, 7}, + { 0x00240120, 2}, + { 0x00240240, 41}, + { 0x002402ec, 1}, + { 0x002402f8, 7}, + { 0x00240320, 2}, + { 0x00240440, 41}, + { 0x002404ec, 1}, + { 0x002404f8, 7}, + { 0x00240520, 2}, + { 0x00240640, 41}, + { 0x002406ec, 1}, + { 0x002406f8, 7}, + { 0x00240720, 2}, + { 0x00240840, 41}, + { 0x002408ec, 1}, + { 0x002408f8, 7}, + { 0x00240920, 2}, + { 0x00240a40, 41}, + { 0x00240aec, 1}, + { 0x00240af8, 7}, + { 0x00240b20, 2}, + { 0x00240c40, 41}, + { 0x00240cec, 1}, + { 0x00240cf8, 2}, + { 0x00240d04, 4}, + { 0x00240d20, 2}, + { 0x00240e40, 41}, + { 0x00240eec, 1}, + { 0x00240ef8, 2}, + { 0x00240f04, 4}, + { 0x00240f20, 2}, + { 0x00241040, 41}, + { 0x002410ec, 1}, + { 0x002410f8, 2}, + { 0x00241104, 4}, + { 0x00241120, 2}, + { 0x00241240, 41}, + { 0x002412ec, 1}, + { 0x002412f8, 2}, + { 0x00241304, 4}, + { 0x00241320, 2}, + { 0x00241440, 41}, + { 0x002414ec, 1}, + { 0x002414f8, 2}, + { 0x00241504, 4}, + { 0x00241520, 2}, + { 0x00241640, 41}, + { 0x002416ec, 1}, + { 0x002416f8, 2}, + { 0x00241704, 4}, + { 0x00241720, 2}, + { 0x00241840, 41}, + { 0x002418ec, 1}, + { 0x002418f8, 2}, + { 0x00241904, 4}, + { 0x00241920, 2}, + { 0x00241a40, 41}, + { 0x00241aec, 1}, + { 0x00241af8, 2}, + { 0x00241b04, 4}, + { 0x00241b20, 2}, + { 0x00241c40, 41}, + { 0x00241cec, 1}, + { 0x00241cf8, 2}, + { 0x00241d04, 4}, + { 0x00241d20, 2}, + { 0x00241e40, 41}, + { 0x00241eec, 1}, + { 0x00241ef8, 2}, + { 0x00241f04, 4}, + { 0x00241f20, 2}, + { 0x00242040, 41}, + { 0x002420ec, 1}, + { 0x002420f8, 2}, + { 0x00242104, 4}, + { 0x00242120, 2}, + { 0x00242240, 41}, + { 0x002422ec, 1}, + { 0x002422f8, 2}, + { 0x00242304, 4}, + { 0x00242320, 2}, + { 0x00242440, 41}, + { 0x002424ec, 1}, + { 0x002424f8, 2}, + { 0x00242504, 4}, + { 0x00242520, 2}, + { 0x00242640, 41}, + { 0x002426ec, 1}, + { 0x002426f8, 2}, + { 0x00242704, 4}, + { 0x00242720, 2}, + { 0x00242840, 41}, + { 0x002428ec, 1}, + { 0x002428f8, 2}, + { 0x00242904, 4}, + { 0x00242920, 2}, + { 0x00242a40, 41}, + { 0x00242aec, 1}, + { 0x00242af8, 2}, + { 0x00242b04, 4}, + { 0x00242b20, 2}, + { 0x00242c40, 41}, + { 0x00242cec, 1}, + { 0x00242cf8, 2}, + { 0x00242d04, 4}, + { 0x00242d20, 2}, + { 0x00242e40, 41}, + { 0x00242eec, 1}, + { 0x00242ef8, 2}, + { 0x00242f04, 4}, + { 0x00242f20, 2}, + { 0x00243040, 41}, + { 0x002430ec, 1}, + { 0x002430f8, 2}, + { 0x00243104, 4}, + { 0x00243120, 2}, + { 0x00243240, 41}, + { 0x002432ec, 1}, + { 0x002432f8, 2}, + { 0x00243304, 4}, + { 0x00243320, 2}, + { 0x00243440, 41}, + { 0x002434ec, 1}, + { 0x002434f8, 2}, + { 0x00243504, 4}, + { 0x00243520, 2}, + { 0x00243640, 41}, + { 0x002436ec, 1}, + { 0x002436f8, 2}, + { 0x00243704, 4}, + { 0x00243720, 2}, + { 0x00243840, 41}, + { 0x002438ec, 1}, + { 0x002438f8, 2}, + { 0x00243904, 4}, + { 0x00243920, 2}, + { 0x00243a40, 41}, + { 0x00243aec, 1}, + { 0x00243af8, 2}, + { 0x00243b04, 4}, + { 0x00243b20, 2}, + { 0x00243c40, 41}, + { 0x00243cec, 1}, + { 0x00243cf8, 2}, + { 0x00243d04, 4}, + { 0x00243d20, 2}, + { 0x00243e40, 41}, + { 0x00243eec, 1}, + { 0x00243ef8, 2}, + { 0x00243f04, 4}, + { 0x00243f20, 2}, + { 0x00244000, 1}, + { 0x00244008, 1}, + { 0x00244010, 2}, + { 0x00246000, 1}, + { 0x00246008, 1}, + { 0x00246010, 2}, + { 0x00248000, 1}, + { 0x00248008, 1}, + { 0x00248010, 2}, + { 0x0024a000, 1}, + { 0x0024a008, 1}, + { 0x0024a010, 11}, + { 0x0024a040, 3}, + { 0x0024a050, 3}, + { 0x0024a060, 4}, + { 0x0024a074, 7}, + { 0x0024a094, 3}, + { 0x0024a0a4, 1}, + { 0x0024a100, 6}, + { 0x00250040, 25}, + { 0x002500c8, 7}, + { 0x002500ec, 1}, + { 0x002500f8, 2}, + { 0x00250104, 4}, + { 0x00250120, 2}, + { 0x00250240, 25}, + { 0x002502c8, 7}, + { 0x002502ec, 1}, + { 0x002502f8, 2}, + { 0x00250304, 4}, + { 0x00250320, 2}, + { 0x00250840, 25}, + { 0x002508c8, 7}, + { 0x002508ec, 1}, + { 0x002508f8, 2}, + { 0x00250904, 4}, + { 0x00250920, 2}, + { 0x00250a40, 25}, + { 0x00250ac8, 7}, + { 0x00250aec, 1}, + { 0x00250af8, 2}, + { 0x00250b04, 4}, + { 0x00250b20, 2}, + { 0x00251800, 3}, + { 0x00251810, 2}, + { 0x00251a00, 3}, + { 0x00251a10, 2}, + { 0x00278040, 25}, + { 0x002780c8, 7}, + { 0x002780ec, 1}, + { 0x002780f8, 2}, + { 0x00278104, 4}, + { 0x00278120, 2}, + { 0x00278240, 25}, + { 0x002782c8, 7}, + { 0x002782ec, 1}, + { 0x002782f8, 2}, + { 0x00278304, 4}, + { 0x00278320, 2}, + { 0x00278440, 25}, + { 0x002784c8, 7}, + { 0x002784ec, 1}, + { 0x002784f8, 2}, + { 0x00278504, 4}, + { 0x00278520, 2}, + { 0x00278640, 25}, + { 0x002786c8, 7}, + { 0x002786ec, 1}, + { 0x002786f8, 2}, + { 0x00278704, 4}, + { 0x00278720, 2}, + { 0x00278840, 25}, + { 0x002788c8, 7}, + { 0x002788ec, 1}, + { 0x002788f8, 2}, + { 0x00278904, 4}, + { 0x00278920, 2}, + { 0x00278a40, 25}, + { 0x00278ac8, 7}, + { 0x00278aec, 1}, + { 0x00278af8, 2}, + { 0x00278b04, 4}, + { 0x00278b20, 2}, + { 0x00278c40, 25}, + { 0x00278cc8, 7}, + { 0x00278cec, 1}, + { 0x00278cf8, 2}, + { 0x00278d04, 4}, + { 0x00278d20, 2}, + { 0x00278e40, 25}, + { 0x00278ec8, 7}, + { 0x00278eec, 1}, + { 0x00278ef8, 2}, + { 0x00278f04, 4}, + { 0x00278f20, 2}, + { 0x00279040, 25}, + { 0x002790c8, 7}, + { 0x002790ec, 1}, + { 0x002790f8, 2}, + { 0x00279104, 4}, + { 0x00279120, 2}, + { 0x00279240, 25}, + { 0x002792c8, 7}, + { 0x002792ec, 1}, + { 0x002792f8, 2}, + { 0x00279304, 4}, + { 0x00279320, 2}, + { 0x00279440, 25}, + { 0x002794c8, 7}, + { 0x002794ec, 1}, + { 0x002794f8, 2}, + { 0x00279504, 4}, + { 0x00279520, 2}, + { 0x00279640, 25}, + { 0x002796c8, 7}, + { 0x002796ec, 1}, + { 0x002796f8, 2}, + { 0x00279704, 4}, + { 0x00279720, 2}, + { 0x00279840, 25}, + { 0x002798c8, 7}, + { 0x002798ec, 1}, + { 0x002798f8, 2}, + { 0x00279904, 4}, + { 0x00279920, 2}, + { 0x00279a40, 25}, + { 0x00279ac8, 7}, + { 0x00279aec, 1}, + { 0x00279af8, 2}, + { 0x00279b04, 4}, + { 0x00279b20, 2}, + { 0x00279c40, 25}, + { 0x00279cc8, 7}, + { 0x00279cec, 1}, + { 0x00279cf8, 2}, + { 0x00279d04, 4}, + { 0x00279d20, 2}, + { 0x00279e40, 25}, + { 0x00279ec8, 7}, + { 0x00279eec, 1}, + { 0x00279ef8, 2}, + { 0x00279f04, 4}, + { 0x00279f20, 2}, + { 0x0027a040, 25}, + { 0x0027a0c8, 7}, + { 0x0027a0ec, 1}, + { 0x0027a0f8, 2}, + { 0x0027a104, 4}, + { 0x0027a120, 2}, + { 0x0027a240, 25}, + { 0x0027a2c8, 7}, + { 0x0027a2ec, 1}, + { 0x0027a2f8, 2}, + { 0x0027a304, 4}, + { 0x0027a320, 2}, + { 0x0027a440, 25}, + { 0x0027a4c8, 7}, + { 0x0027a4ec, 1}, + { 0x0027a4f8, 2}, + { 0x0027a504, 4}, + { 0x0027a520, 2}, + { 0x0027a640, 25}, + { 0x0027a6c8, 7}, + { 0x0027a6ec, 1}, + { 0x0027a6f8, 2}, + { 0x0027a704, 4}, + { 0x0027a720, 2}, + { 0x0027a840, 25}, + { 0x0027a8c8, 7}, + { 0x0027a8ec, 1}, + { 0x0027a8f8, 2}, + { 0x0027a904, 4}, + { 0x0027a920, 2}, + { 0x0027aa40, 25}, + { 0x0027aac8, 7}, + { 0x0027aaec, 1}, + { 0x0027aaf8, 2}, + { 0x0027ab04, 4}, + { 0x0027ab20, 2}, + { 0x0027ac40, 25}, + { 0x0027acc8, 7}, + { 0x0027acec, 1}, + { 0x0027acf8, 2}, + { 0x0027ad04, 4}, + { 0x0027ad20, 2}, + { 0x0027ae40, 25}, + { 0x0027aec8, 7}, + { 0x0027aeec, 1}, + { 0x0027aef8, 2}, + { 0x0027af04, 4}, + { 0x0027af20, 2}, + { 0x0027b040, 25}, + { 0x0027b0c8, 7}, + { 0x0027b0ec, 1}, + { 0x0027b0f8, 2}, + { 0x0027b104, 4}, + { 0x0027b120, 2}, + { 0x0027b240, 25}, + { 0x0027b2c8, 7}, + { 0x0027b2ec, 1}, + { 0x0027b2f8, 2}, + { 0x0027b304, 4}, + { 0x0027b320, 2}, + { 0x0027b440, 25}, + { 0x0027b4c8, 7}, + { 0x0027b4ec, 1}, + { 0x0027b4f8, 2}, + { 0x0027b504, 4}, + { 0x0027b520, 2}, + { 0x0027b640, 25}, + { 0x0027b6c8, 7}, + { 0x0027b6ec, 1}, + { 0x0027b6f8, 2}, + { 0x0027b704, 4}, + { 0x0027b720, 2}, + { 0x0027b840, 25}, + { 0x0027b8c8, 7}, + { 0x0027b8ec, 1}, + { 0x0027b8f8, 2}, + { 0x0027b904, 4}, + { 0x0027b920, 2}, + { 0x0027ba40, 25}, + { 0x0027bac8, 7}, + { 0x0027baec, 1}, + { 0x0027baf8, 2}, + { 0x0027bb04, 4}, + { 0x0027bb20, 2}, + { 0x0027bc40, 25}, + { 0x0027bcc8, 7}, + { 0x0027bcec, 1}, + { 0x0027bcf8, 2}, + { 0x0027bd04, 4}, + { 0x0027bd20, 2}, + { 0x0027be40, 25}, + { 0x0027bec8, 7}, + { 0x0027beec, 1}, + { 0x0027bef8, 2}, + { 0x0027bf04, 4}, + { 0x0027bf20, 2}, + { 0x0027c040, 25}, + { 0x0027c0c8, 7}, + { 0x0027c0ec, 1}, + { 0x0027c0f8, 2}, + { 0x0027c104, 4}, + { 0x0027c120, 2}, + { 0x0027c240, 25}, + { 0x0027c2c8, 7}, + { 0x0027c2ec, 1}, + { 0x0027c2f8, 2}, + { 0x0027c304, 4}, + { 0x0027c320, 2}, + { 0x0027c440, 25}, + { 0x0027c4c8, 7}, + { 0x0027c4ec, 1}, + { 0x0027c4f8, 2}, + { 0x0027c504, 4}, + { 0x0027c520, 2}, + { 0x0027c640, 25}, + { 0x0027c6c8, 7}, + { 0x0027c6ec, 1}, + { 0x0027c6f8, 2}, + { 0x0027c704, 4}, + { 0x0027c720, 2}, + { 0x0027c840, 25}, + { 0x0027c8c8, 7}, + { 0x0027c8ec, 1}, + { 0x0027c8f8, 2}, + { 0x0027c904, 4}, + { 0x0027c920, 2}, + { 0x0027ca40, 25}, + { 0x0027cac8, 7}, + { 0x0027caec, 1}, + { 0x0027caf8, 2}, + { 0x0027cb04, 4}, + { 0x0027cb20, 2}, + { 0x0027cc40, 25}, + { 0x0027ccc8, 7}, + { 0x0027ccec, 1}, + { 0x0027ccf8, 2}, + { 0x0027cd04, 4}, + { 0x0027cd20, 2}, + { 0x0027ce40, 25}, + { 0x0027cec8, 7}, + { 0x0027ceec, 1}, + { 0x0027cef8, 2}, + { 0x0027cf04, 4}, + { 0x0027cf20, 2}, + { 0x0027d040, 25}, + { 0x0027d0c8, 7}, + { 0x0027d0ec, 1}, + { 0x0027d0f8, 2}, + { 0x0027d104, 4}, + { 0x0027d120, 2}, + { 0x0027d240, 25}, + { 0x0027d2c8, 7}, + { 0x0027d2ec, 1}, + { 0x0027d2f8, 2}, + { 0x0027d304, 4}, + { 0x0027d320, 2}, + { 0x0027d440, 25}, + { 0x0027d4c8, 7}, + { 0x0027d4ec, 1}, + { 0x0027d4f8, 2}, + { 0x0027d504, 4}, + { 0x0027d520, 2}, + { 0x0027d640, 25}, + { 0x0027d6c8, 7}, + { 0x0027d6ec, 1}, + { 0x0027d6f8, 2}, + { 0x0027d704, 4}, + { 0x0027d720, 2}, + { 0x0027d840, 25}, + { 0x0027d8c8, 7}, + { 0x0027d8ec, 1}, + { 0x0027d8f8, 2}, + { 0x0027d904, 4}, + { 0x0027d920, 2}, + { 0x0027da40, 25}, + { 0x0027dac8, 7}, + { 0x0027daec, 1}, + { 0x0027daf8, 2}, + { 0x0027db04, 4}, + { 0x0027db20, 2}, + { 0x0027dc40, 25}, + { 0x0027dcc8, 7}, + { 0x0027dcec, 1}, + { 0x0027dcf8, 2}, + { 0x0027dd04, 4}, + { 0x0027dd20, 2}, + { 0x0027de40, 25}, + { 0x0027dec8, 7}, + { 0x0027deec, 1}, + { 0x0027def8, 2}, + { 0x0027df04, 4}, + { 0x0027df20, 2}, + { 0x0027e040, 25}, + { 0x0027e0c8, 7}, + { 0x0027e0ec, 1}, + { 0x0027e0f8, 2}, + { 0x0027e104, 4}, + { 0x0027e120, 2}, + { 0x0027e240, 25}, + { 0x0027e2c8, 7}, + { 0x0027e2ec, 1}, + { 0x0027e2f8, 2}, + { 0x0027e304, 4}, + { 0x0027e320, 2}, + { 0x0027e440, 25}, + { 0x0027e4c8, 7}, + { 0x0027e4ec, 1}, + { 0x0027e4f8, 2}, + { 0x0027e504, 4}, + { 0x0027e520, 2}, + { 0x0027e640, 25}, + { 0x0027e6c8, 7}, + { 0x0027e6ec, 1}, + { 0x0027e6f8, 2}, + { 0x0027e704, 4}, + { 0x0027e720, 2}, + { 0x0027e840, 25}, + { 0x0027e8c8, 7}, + { 0x0027e8ec, 1}, + { 0x0027e8f8, 2}, + { 0x0027e904, 4}, + { 0x0027e920, 2}, + { 0x0027ea40, 25}, + { 0x0027eac8, 7}, + { 0x0027eaec, 1}, + { 0x0027eaf8, 2}, + { 0x0027eb04, 4}, + { 0x0027eb20, 2}, + { 0x0027ec40, 25}, + { 0x0027ecc8, 7}, + { 0x0027ecec, 1}, + { 0x0027ecf8, 2}, + { 0x0027ed04, 4}, + { 0x0027ed20, 2}, + { 0x0027ee40, 25}, + { 0x0027eec8, 7}, + { 0x0027eeec, 1}, + { 0x0027eef8, 2}, + { 0x0027ef04, 4}, + { 0x0027ef20, 2}, + { 0x0027f040, 25}, + { 0x0027f0c8, 7}, + { 0x0027f0ec, 1}, + { 0x0027f0f8, 2}, + { 0x0027f104, 4}, + { 0x0027f120, 2}, + { 0x0027f240, 25}, + { 0x0027f2c8, 7}, + { 0x0027f2ec, 1}, + { 0x0027f2f8, 2}, + { 0x0027f304, 4}, + { 0x0027f320, 2}, + { 0x0027f440, 25}, + { 0x0027f4c8, 7}, + { 0x0027f4ec, 1}, + { 0x0027f4f8, 2}, + { 0x0027f504, 4}, + { 0x0027f520, 2}, + { 0x0027f640, 25}, + { 0x0027f6c8, 7}, + { 0x0027f6ec, 1}, + { 0x0027f6f8, 2}, + { 0x0027f704, 4}, + { 0x0027f720, 2}, + { 0x0027f840, 25}, + { 0x0027f8c8, 7}, + { 0x0027f8ec, 1}, + { 0x0027f8f8, 2}, + { 0x0027f904, 4}, + { 0x0027f920, 2}, + { 0x0027fa40, 25}, + { 0x0027fac8, 7}, + { 0x0027faec, 1}, + { 0x0027faf8, 2}, + { 0x0027fb04, 4}, + { 0x0027fb20, 2}, + { 0x0027fc40, 25}, + { 0x0027fcc8, 7}, + { 0x0027fcec, 1}, + { 0x0027fcf8, 2}, + { 0x0027fd04, 4}, + { 0x0027fd20, 2}, + { 0x0027fe40, 25}, + { 0x0027fec8, 7}, + { 0x0027feec, 1}, + { 0x0027fef8, 2}, + { 0x0027ff04, 4}, + { 0x0027ff20, 2}, + { 0x00400500, 1}, + { 0x0040415c, 1}, + { 0x00404468, 1}, + { 0x00404498, 1}, + { 0x00405800, 1}, + { 0x00405840, 2}, + { 0x00405850, 1}, + { 0x00405908, 1}, + { 0x00405a00, 1}, + { 0x00405b50, 1}, + { 0x00406024, 5}, + { 0x00407010, 1}, + { 0x00407808, 1}, + { 0x0040803c, 1}, + { 0x00408804, 1}, + { 0x0040880c, 1}, + { 0x00408900, 2}, + { 0x00408910, 1}, + { 0x00408944, 1}, + { 0x00408984, 1}, + { 0x004090a8, 1}, + { 0x004098a0, 1}, + { 0x00409b00, 1}, + { 0x0041000c, 1}, + { 0x00410110, 1}, + { 0x00410184, 1}, + { 0x0041040c, 1}, + { 0x00410510, 1}, + { 0x00410584, 1}, + { 0x00418000, 1}, + { 0x00418008, 1}, + { 0x00418380, 2}, + { 0x00418400, 2}, + { 0x004184a0, 1}, + { 0x00418604, 1}, + { 0x00418680, 1}, + { 0x00418704, 1}, + { 0x00418714, 1}, + { 0x00418800, 1}, + { 0x0041881c, 1}, + { 0x00418830, 1}, + { 0x00418884, 1}, + { 0x004188b0, 1}, + { 0x004188c8, 3}, + { 0x004188fc, 1}, + { 0x00418b04, 1}, + { 0x00418c04, 1}, + { 0x00418c10, 8}, + { 0x00418c88, 1}, + { 0x00418d00, 1}, + { 0x00418e00, 1}, + { 0x00418e08, 1}, + { 0x00418e34, 1}, + { 0x00418e40, 4}, + { 0x00418e58, 16}, + { 0x00418f08, 1}, + { 0x00419000, 1}, + { 0x0041900c, 1}, + { 0x00419018, 1}, + { 0x00419854, 1}, + { 0x00419864, 1}, + { 0x00419a04, 2}, + { 0x00419ab0, 1}, + { 0x00419b04, 1}, + { 0x00419b3c, 1}, + { 0x00419b48, 1}, + { 0x00419b50, 1}, + { 0x00419ba0, 2}, + { 0x00419bb0, 1}, + { 0x00419bdc, 1}, + { 0x00419c0c, 1}, + { 0x00419d00, 1}, + { 0x00419d08, 2}, + { 0x00419e08, 1}, + { 0x00419e80, 8}, + { 0x00419ea8, 5}, + { 0x00419f00, 8}, + { 0x00419f28, 5}, + { 0x00419f80, 8}, + { 0x00419fa8, 5}, + { 0x0041a02c, 2}, + { 0x0041a0a8, 1}, + { 0x0041a8a0, 3}, + { 0x0041b014, 1}, + { 0x0041b0cc, 1}, + { 0x0041b1dc, 1}, + { 0x0041b214, 1}, + { 0x0041b2cc, 1}, + { 0x0041b3dc, 1}, + { 0x0041be0c, 3}, + { 0x0041becc, 1}, + { 0x0041bfdc, 1}, + { 0x0041c054, 1}, + { 0x0041c2b0, 1}, + { 0x0041c304, 1}, + { 0x0041c33c, 1}, + { 0x0041c348, 1}, + { 0x0041c350, 1}, + { 0x0041c3a0, 2}, + { 0x0041c3b0, 1}, + { 0x0041c3dc, 1}, + { 0x0041c40c, 1}, + { 0x0041c500, 1}, + { 0x0041c508, 2}, + { 0x0041c608, 1}, + { 0x0041c680, 8}, + { 0x0041c6a8, 5}, + { 0x0041c700, 8}, + { 0x0041c728, 5}, + { 0x0041c780, 8}, + { 0x0041c7a8, 5}, + { 0x0041c854, 1}, + { 0x0041cab0, 1}, + { 0x0041cb04, 1}, + { 0x0041cb3c, 1}, + { 0x0041cb48, 1}, + { 0x0041cb50, 1}, + { 0x0041cba0, 2}, + { 0x0041cbb0, 1}, + { 0x0041cbdc, 1}, + { 0x0041cc0c, 1}, + { 0x0041cd00, 1}, + { 0x0041cd08, 2}, + { 0x0041ce08, 1}, + { 0x0041ce80, 8}, + { 0x0041cea8, 5}, + { 0x0041cf00, 8}, + { 0x0041cf28, 5}, + { 0x0041cf80, 8}, + { 0x0041cfa8, 5}, + { 0x0041d054, 1}, + { 0x0041d2b0, 1}, + { 0x0041d304, 1}, + { 0x0041d33c, 1}, + { 0x0041d348, 1}, + { 0x0041d350, 1}, + { 0x0041d3a0, 2}, + { 0x0041d3b0, 1}, + { 0x0041d3dc, 1}, + { 0x0041d40c, 1}, + { 0x0041d500, 1}, + { 0x0041d508, 2}, + { 0x0041d608, 1}, + { 0x0041d680, 8}, + { 0x0041d6a8, 5}, + { 0x0041d700, 8}, + { 0x0041d728, 5}, + { 0x0041d780, 8}, + { 0x0041d7a8, 5}, + { 0x0041d854, 1}, + { 0x0041dab0, 1}, + { 0x0041db04, 1}, + { 0x0041db3c, 1}, + { 0x0041db48, 1}, + { 0x0041db50, 1}, + { 0x0041dba0, 2}, + { 0x0041dbb0, 1}, + { 0x0041dbdc, 1}, + { 0x0041dc0c, 1}, + { 0x0041dd00, 1}, + { 0x0041dd08, 2}, + { 0x0041de08, 1}, + { 0x0041de80, 8}, + { 0x0041dea8, 5}, + { 0x0041df00, 8}, + { 0x0041df28, 5}, + { 0x0041df80, 8}, + { 0x0041dfa8, 5}, + { 0x00481a00, 19}, + { 0x00481b00, 50}, + { 0x00481e00, 50}, + { 0x00481f00, 50}, + { 0x00484200, 19}, + { 0x00484300, 50}, + { 0x00484600, 50}, + { 0x00484700, 50}, + { 0x00484a00, 19}, + { 0x00484b00, 50}, + { 0x00484e00, 50}, + { 0x00484f00, 50}, + { 0x00485200, 19}, + { 0x00485300, 50}, + { 0x00485600, 50}, + { 0x00485700, 50}, + { 0x00485a00, 19}, + { 0x00485b00, 50}, + { 0x00485e00, 50}, + { 0x00485f00, 50}, + { 0x00500384, 1}, + { 0x005004a0, 1}, + { 0x00500604, 1}, + { 0x00500680, 1}, + { 0x00500714, 1}, + { 0x0050081c, 1}, + { 0x00500884, 1}, + { 0x005008b0, 1}, + { 0x005008c8, 3}, + { 0x005008fc, 1}, + { 0x00500b04, 1}, + { 0x00500c04, 1}, + { 0x00500c10, 8}, + { 0x00500c88, 1}, + { 0x00500d00, 1}, + { 0x00500e08, 1}, + { 0x00500f08, 1}, + { 0x00501000, 1}, + { 0x0050100c, 1}, + { 0x00501018, 1}, + { 0x00501854, 1}, + { 0x00501ab0, 1}, + { 0x00501b04, 1}, + { 0x00501b3c, 1}, + { 0x00501b48, 1}, + { 0x00501b50, 1}, + { 0x00501ba0, 2}, + { 0x00501bb0, 1}, + { 0x00501bdc, 1}, + { 0x00501c0c, 1}, + { 0x00501d00, 1}, + { 0x00501d08, 2}, + { 0x00501e08, 1}, + { 0x00501e80, 8}, + { 0x00501ea8, 5}, + { 0x00501f00, 8}, + { 0x00501f28, 5}, + { 0x00501f80, 8}, + { 0x00501fa8, 5}, + { 0x0050202c, 2}, + { 0x005020a8, 1}, + { 0x005028a0, 3}, + { 0x00503014, 1}, + { 0x005030cc, 1}, + { 0x005031dc, 1}, + { 0x00503214, 1}, + { 0x005032cc, 1}, + { 0x005033dc, 1}, + { 0x00503e14, 1}, + { 0x00503ecc, 1}, + { 0x00503fdc, 1}, + { 0x00504054, 1}, + { 0x005042b0, 1}, + { 0x00504304, 1}, + { 0x0050433c, 1}, + { 0x00504348, 1}, + { 0x00504350, 1}, + { 0x005043a0, 2}, + { 0x005043b0, 1}, + { 0x005043dc, 1}, + { 0x0050440c, 1}, + { 0x00504500, 1}, + { 0x00504508, 2}, + { 0x00504608, 1}, + { 0x00504680, 8}, + { 0x005046a8, 5}, + { 0x00504700, 8}, + { 0x00504728, 5}, + { 0x00504780, 8}, + { 0x005047a8, 5}, + { 0x00504854, 1}, + { 0x00504ab0, 1}, + { 0x00504b04, 1}, + { 0x00504b3c, 1}, + { 0x00504b48, 1}, + { 0x00504b50, 1}, + { 0x00504ba0, 2}, + { 0x00504bb0, 1}, + { 0x00504bdc, 1}, + { 0x00504c0c, 1}, + { 0x00504d00, 1}, + { 0x00504d08, 2}, + { 0x00504e08, 1}, + { 0x00504e80, 8}, + { 0x00504ea8, 5}, + { 0x00504f00, 8}, + { 0x00504f28, 5}, + { 0x00504f80, 8}, + { 0x00504fa8, 5}, + { 0x00505054, 1}, + { 0x005052b0, 1}, + { 0x00505304, 1}, + { 0x0050533c, 1}, + { 0x00505348, 1}, + { 0x00505350, 1}, + { 0x005053a0, 2}, + { 0x005053b0, 1}, + { 0x005053dc, 1}, + { 0x0050540c, 1}, + { 0x00505500, 1}, + { 0x00505508, 2}, + { 0x00505608, 1}, + { 0x00505680, 8}, + { 0x005056a8, 5}, + { 0x00505700, 8}, + { 0x00505728, 5}, + { 0x00505780, 8}, + { 0x005057a8, 5}, + { 0x00505854, 1}, + { 0x00505ab0, 1}, + { 0x00505b04, 1}, + { 0x00505b3c, 1}, + { 0x00505b48, 1}, + { 0x00505b50, 1}, + { 0x00505ba0, 2}, + { 0x00505bb0, 1}, + { 0x00505bdc, 1}, + { 0x00505c0c, 1}, + { 0x00505d00, 1}, + { 0x00505d08, 2}, + { 0x00505e08, 1}, + { 0x00505e80, 8}, + { 0x00505ea8, 5}, + { 0x00505f00, 8}, + { 0x00505f28, 5}, + { 0x00505f80, 8}, + { 0x00505fa8, 5}, + { 0x00581a00, 19}, + { 0x00581b00, 50}, + { 0x00581e00, 50}, + { 0x00581f00, 50}, + { 0x00584200, 19}, + { 0x00584300, 50}, + { 0x00584600, 50}, + { 0x00584700, 50}, + { 0x00584a00, 19}, + { 0x00584b00, 50}, + { 0x00584e00, 50}, + { 0x00584f00, 50}, + { 0x00585200, 19}, + { 0x00585300, 50}, + { 0x00585600, 50}, + { 0x00585700, 50}, + { 0x00585a00, 19}, + { 0x00585b00, 50}, + { 0x00585e00, 50}, + { 0x00585f00, 50}, + { 0x00900100, 1}, + { 0x009a0100, 1}, + { 0x00a00160, 2}, + { 0x00a007d0, 1}, + { 0x00a04200, 1}, + { 0x00a04470, 2}, + { 0x00a08190, 1}, + { 0x00a08198, 4}, + { 0x00a0c820, 2}, + { 0x00a0cc20, 2}, + { 0x00a0e470, 2}, + { 0x00a0e490, 9}, + { 0x00a0e6a8, 7}, + { 0x00a0e6c8, 2}, + { 0x00a0e6d4, 7}, + { 0x00a0e6f4, 2}, + { 0x00a0ec70, 2}, + { 0x00a0ec90, 9}, + { 0x00a0eea8, 7}, + { 0x00a0eec8, 2}, + { 0x00a0eed4, 7}, + { 0x00a0eef4, 2}, + { 0x00a10190, 1}, + { 0x00a10198, 4}, + { 0x00a14820, 2}, + { 0x00a14c20, 2}, + { 0x00a16470, 2}, + { 0x00a16490, 9}, + { 0x00a166a8, 7}, + { 0x00a166c8, 2}, + { 0x00a166d4, 7}, + { 0x00a166f4, 2}, + { 0x00a16c70, 2}, + { 0x00a16c90, 9}, + { 0x00a16ea8, 7}, + { 0x00a16ec8, 2}, + { 0x00a16ed4, 7}, + { 0x00a16ef4, 2}, + { 0x00a18190, 1}, + { 0x00a18198, 4}, + { 0x00a1c820, 2}, + { 0x00a1cc20, 2}, + { 0x00a1e470, 2}, + { 0x00a1e490, 9}, + { 0x00a1e6a8, 7}, + { 0x00a1e6c8, 2}, + { 0x00a1e6d4, 7}, + { 0x00a1e6f4, 2}, + { 0x00a1ec70, 2}, + { 0x00a1ec90, 9}, + { 0x00a1eea8, 7}, + { 0x00a1eec8, 2}, + { 0x00a1eed4, 7}, + { 0x00a1eef4, 2}, + { 0x00a20190, 1}, + { 0x00a20198, 4}, + { 0x00a24820, 2}, + { 0x00a24c20, 2}, + { 0x00a26470, 2}, + { 0x00a26490, 9}, + { 0x00a266a8, 7}, + { 0x00a266c8, 2}, + { 0x00a266d4, 7}, + { 0x00a266f4, 2}, + { 0x00a26c70, 2}, + { 0x00a26c90, 9}, + { 0x00a26ea8, 7}, + { 0x00a26ec8, 2}, + { 0x00a26ed4, 7}, + { 0x00a26ef4, 2}, + { 0x00a28190, 1}, + { 0x00a28198, 4}, + { 0x00a2c820, 2}, + { 0x00a2cc20, 2}, + { 0x00a2e470, 2}, + { 0x00a2e490, 9}, + { 0x00a2e6a8, 7}, + { 0x00a2e6c8, 2}, + { 0x00a2e6d4, 7}, + { 0x00a2e6f4, 2}, + { 0x00a2ec70, 2}, + { 0x00a2ec90, 9}, + { 0x00a2eea8, 7}, + { 0x00a2eec8, 2}, + { 0x00a2eed4, 7}, + { 0x00a2eef4, 2}, + { 0x00a30190, 1}, + { 0x00a30198, 4}, + { 0x00a34820, 2}, + { 0x00a34c20, 2}, + { 0x00a36470, 2}, + { 0x00a36490, 9}, + { 0x00a366a8, 7}, + { 0x00a366c8, 2}, + { 0x00a366d4, 7}, + { 0x00a366f4, 2}, + { 0x00a36c70, 2}, + { 0x00a36c90, 9}, + { 0x00a36ea8, 7}, + { 0x00a36ec8, 2}, + { 0x00a36ed4, 7}, + { 0x00a36ef4, 2}, + { 0x00a38190, 1}, + { 0x00a38198, 4}, + { 0x00a3c820, 2}, + { 0x00a3cc20, 2}, + { 0x00a3e470, 2}, + { 0x00a3e490, 9}, + { 0x00a3e6a8, 7}, + { 0x00a3e6c8, 2}, + { 0x00a3e6d4, 7}, + { 0x00a3e6f4, 2}, + { 0x00a3ec70, 2}, + { 0x00a3ec90, 9}, + { 0x00a3eea8, 7}, + { 0x00a3eec8, 2}, + { 0x00a3eed4, 7}, + { 0x00a3eef4, 2}, +}; + + +static const u32 gv11b_global_whitelist_ranges_count = + ARRAY_SIZE(gv11b_global_whitelist_ranges); + +/* context */ + +/* runcontrol */ +static const u32 gv11b_runcontrol_whitelist[] = { +}; +static const u32 gv11b_runcontrol_whitelist_count = + ARRAY_SIZE(gv11b_runcontrol_whitelist); + +static const struct regop_offset_range gv11b_runcontrol_whitelist_ranges[] = { +}; +static const u32 gv11b_runcontrol_whitelist_ranges_count = + ARRAY_SIZE(gv11b_runcontrol_whitelist_ranges); + + +/* quad ctl */ +static const u32 gv11b_qctl_whitelist[] = { +}; +static const u32 gv11b_qctl_whitelist_count = + ARRAY_SIZE(gv11b_qctl_whitelist); + +static const struct regop_offset_range gv11b_qctl_whitelist_ranges[] = { +}; +static const u32 gv11b_qctl_whitelist_ranges_count = + ARRAY_SIZE(gv11b_qctl_whitelist_ranges); + +const struct regop_offset_range *gv11b_get_global_whitelist_ranges(void) +{ + return gv11b_global_whitelist_ranges; +} + +int gv11b_get_global_whitelist_ranges_count(void) +{ + return gv11b_global_whitelist_ranges_count; +} + +const struct regop_offset_range *gv11b_get_context_whitelist_ranges(void) +{ + return gv11b_global_whitelist_ranges; +} + +int gv11b_get_context_whitelist_ranges_count(void) +{ + return gv11b_global_whitelist_ranges_count; +} + +const u32 *gv11b_get_runcontrol_whitelist(void) +{ + return gv11b_runcontrol_whitelist; +} + +int gv11b_get_runcontrol_whitelist_count(void) +{ + return gv11b_runcontrol_whitelist_count; +} + +const struct regop_offset_range *gv11b_get_runcontrol_whitelist_ranges(void) +{ + return gv11b_runcontrol_whitelist_ranges; +} + +int gv11b_get_runcontrol_whitelist_ranges_count(void) +{ + return gv11b_runcontrol_whitelist_ranges_count; +} + +const u32 *gv11b_get_qctl_whitelist(void) +{ + return gv11b_qctl_whitelist; +} + +int gv11b_get_qctl_whitelist_count(void) +{ + return gv11b_qctl_whitelist_count; +} + +const struct regop_offset_range *gv11b_get_qctl_whitelist_ranges(void) +{ + return gv11b_qctl_whitelist_ranges; +} + +int gv11b_get_qctl_whitelist_ranges_count(void) +{ + return gv11b_qctl_whitelist_ranges_count; +} + +int gv11b_apply_smpc_war(struct dbg_session_gk20a *dbg_s) +{ + /* Not needed on gv11b */ + return 0; +} diff --git a/drivers/gpu/nvgpu/gv11b/regops_gv11b.h b/drivers/gpu/nvgpu/gv11b/regops_gv11b.h new file mode 100644 index 000000000..0ee2edfeb --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/regops_gv11b.h @@ -0,0 +1,42 @@ +/* + * + * Tegra GV11B GPU Driver Register Ops + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __REGOPS_GV11B_H_ +#define __REGOPS_GV11B_H_ + +const struct regop_offset_range *gv11b_get_global_whitelist_ranges(void); +int gv11b_get_global_whitelist_ranges_count(void); +const struct regop_offset_range *gv11b_get_context_whitelist_ranges(void); +int gv11b_get_context_whitelist_ranges_count(void); +const u32 *gv11b_get_runcontrol_whitelist(void); +int gv11b_get_runcontrol_whitelist_count(void); +const struct regop_offset_range *gv11b_get_runcontrol_whitelist_ranges(void); +int gv11b_get_runcontrol_whitelist_ranges_count(void); +const u32 *gv11b_get_qctl_whitelist(void); +int gv11b_get_qctl_whitelist_count(void); +const struct regop_offset_range *gv11b_get_qctl_whitelist_ranges(void); +int gv11b_get_qctl_whitelist_ranges_count(void); +int gv11b_apply_smpc_war(struct dbg_session_gk20a *dbg_s); + +#endif /* __REGOPS_GV11B_H_ */ diff --git a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c new file mode 100644 index 000000000..fe1aa8a5d --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.c @@ -0,0 +1,185 @@ +/* + * Volta GPU series Subcontext + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gk20a/gk20a.h" + +#include "gv11b/subctx_gv11b.h" + +#include +#include +#include + +#include +#include + +static void gv11b_init_subcontext_pdb(struct channel_gk20a *c, + struct nvgpu_mem *inst_block); + +static void gv11b_subctx_commit_valid_mask(struct channel_gk20a *c, + struct nvgpu_mem *inst_block); +static void gv11b_subctx_commit_pdb(struct channel_gk20a *c, + struct nvgpu_mem *inst_block); + +void gv11b_free_subctx_header(struct channel_gk20a *c) +{ + struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header; + struct gk20a *g = c->g; + + nvgpu_log(g, gpu_dbg_fn, "gv11b_free_subctx_header"); + + if (ctx->mem.gpu_va) { + nvgpu_gmmu_unmap(c->vm, &ctx->mem, ctx->mem.gpu_va); + + nvgpu_dma_free(g, &ctx->mem); + } +} + +int gv11b_alloc_subctx_header(struct channel_gk20a *c) +{ + struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header; + struct gk20a *g = c->g; + int ret = 0; + + nvgpu_log(g, gpu_dbg_fn, "gv11b_alloc_subctx_header"); + + if (ctx->mem.gpu_va == 0) { + ret = nvgpu_dma_alloc_flags_sys(g, + 0, /* No Special flags */ + ctxsw_prog_fecs_header_v(), + &ctx->mem); + if (ret) { + nvgpu_err(g, "failed to allocate sub ctx header"); + return ret; + } + ctx->mem.gpu_va = nvgpu_gmmu_map(c->vm, + &ctx->mem, + ctx->mem.size, + 0, /* not GPU-cacheable */ + gk20a_mem_flag_none, true, + ctx->mem.aperture); + if (!ctx->mem.gpu_va) { + nvgpu_err(g, "failed to map ctx header"); + nvgpu_dma_free(g, &ctx->mem); + return -ENOMEM; + } + /* Now clear the buffer */ + if (nvgpu_mem_begin(g, &ctx->mem)) + return -ENOMEM; + + nvgpu_memset(g, &ctx->mem, 0, 0, ctx->mem.size); + nvgpu_mem_end(g, &ctx->mem); + + gv11b_init_subcontext_pdb(c, &c->inst_block); + } + return ret; +} + +static void gv11b_init_subcontext_pdb(struct channel_gk20a *c, + struct nvgpu_mem *inst_block) +{ + struct gk20a *g = c->g; + + gv11b_subctx_commit_pdb(c, inst_block); + gv11b_subctx_commit_valid_mask(c, inst_block); + + nvgpu_log(g, gpu_dbg_info, " subctx %d instblk set", c->t19x.subctx_id); + nvgpu_mem_wr32(g, inst_block, ram_in_engine_wfi_veid_w(), + ram_in_engine_wfi_veid_f(c->t19x.subctx_id)); + +} + +int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va) +{ + struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header; + struct nvgpu_mem *gr_mem; + struct gk20a *g = c->g; + int ret = 0; + u32 addr_lo, addr_hi; + + addr_lo = u64_lo32(gpu_va); + addr_hi = u64_hi32(gpu_va); + + gr_mem = &ctx->mem; + g->ops.mm.l2_flush(g, true); + if (nvgpu_mem_begin(g, gr_mem)) + return -ENOMEM; + + nvgpu_mem_wr(g, gr_mem, + ctxsw_prog_main_image_context_buffer_ptr_hi_o(), addr_hi); + nvgpu_mem_wr(g, gr_mem, + ctxsw_prog_main_image_context_buffer_ptr_o(), addr_lo); + + nvgpu_mem_wr(g, gr_mem, + ctxsw_prog_main_image_ctl_o(), + ctxsw_prog_main_image_ctl_type_per_veid_header_v()); + nvgpu_mem_end(g, gr_mem); + return ret; +} + +void gv11b_subctx_commit_valid_mask(struct channel_gk20a *c, + struct nvgpu_mem *inst_block) +{ + struct gk20a *g = c->g; + + /* Make all subctx pdbs valid */ + nvgpu_mem_wr32(g, inst_block, 166, 0xffffffff); + nvgpu_mem_wr32(g, inst_block, 167, 0xffffffff); +} + +void gv11b_subctx_commit_pdb(struct channel_gk20a *c, + struct nvgpu_mem *inst_block) +{ + struct gk20a *g = c->g; + struct fifo_gk20a *f = &g->fifo; + struct vm_gk20a *vm = c->vm; + u32 lo, hi; + u32 subctx_id = 0; + u32 format_word; + u32 pdb_addr_lo, pdb_addr_hi; + u64 pdb_addr; + u32 aperture = nvgpu_aperture_mask(g, vm->pdb.mem, + ram_in_sc_page_dir_base_target_sys_mem_ncoh_v(), + ram_in_sc_page_dir_base_target_vid_mem_v()); + + pdb_addr = nvgpu_mem_get_addr(g, vm->pdb.mem); + pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v()); + pdb_addr_hi = u64_hi32(pdb_addr); + format_word = ram_in_sc_page_dir_base_target_f( + aperture, 0) | + ram_in_sc_page_dir_base_vol_f( + ram_in_sc_page_dir_base_vol_true_v(), 0) | + ram_in_sc_page_dir_base_fault_replay_tex_f(1, 0) | + ram_in_sc_page_dir_base_fault_replay_gcc_f(1, 0) | + ram_in_sc_use_ver2_pt_format_f(1, 0) | + ram_in_sc_big_page_size_f(1, 0) | + ram_in_sc_page_dir_base_lo_0_f(pdb_addr_lo); + nvgpu_log(g, gpu_dbg_info, " pdb info lo %x hi %x", + format_word, pdb_addr_hi); + for (subctx_id = 0; subctx_id < f->t19x.max_subctx_count; subctx_id++) { + lo = ram_in_sc_page_dir_base_vol_0_w() + (4 * subctx_id); + hi = ram_in_sc_page_dir_base_hi_0_w() + (4 * subctx_id); + nvgpu_mem_wr32(g, inst_block, lo, format_word); + nvgpu_mem_wr32(g, inst_block, hi, pdb_addr_hi); + } +} diff --git a/drivers/gpu/nvgpu/gv11b/subctx_gv11b.h b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.h new file mode 100644 index 000000000..10dc0ba5d --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/subctx_gv11b.h @@ -0,0 +1,34 @@ +/* + * + * Volta GPU series Subcontext + * + * Copyright (c) 2016 - 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __SUBCONTEXT_GV11B_H__ +#define __SUBCONTEXT_GV11B_H__ + +int gv11b_alloc_subctx_header(struct channel_gk20a *c); + +void gv11b_free_subctx_header(struct channel_gk20a *c); + +int gv11b_update_subctx_header(struct channel_gk20a *c, u64 gpu_va); + +#endif /* __SUBCONTEXT_GV11B_H__ */ diff --git a/drivers/gpu/nvgpu/gv11b/therm_gv11b.c b/drivers/gpu/nvgpu/gv11b/therm_gv11b.c new file mode 100644 index 000000000..18987119d --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/therm_gv11b.c @@ -0,0 +1,75 @@ +/* + * GV11B Therm + * + * Copyright (c) 2015-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gk20a/gk20a.h" + +#include + +#include + +int gv11b_elcg_init_idle_filters(struct gk20a *g) +{ + u32 gate_ctrl, idle_filter; + u32 engine_id; + u32 active_engine_id = 0; + struct fifo_gk20a *f = &g->fifo; + + if (nvgpu_platform_is_simulation(g)) + return 0; + + gk20a_dbg_info("init clock/power gate reg"); + + for (engine_id = 0; engine_id < f->num_engines; engine_id++) { + active_engine_id = f->active_engines_list[engine_id]; + + gate_ctrl = gk20a_readl(g, therm_gate_ctrl_r(active_engine_id)); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_idle_filt_exp_m(), + therm_gate_ctrl_eng_idle_filt_exp__prod_f()); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_idle_filt_mant_m(), + therm_gate_ctrl_eng_idle_filt_mant__prod_f()); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_delay_before_m(), + therm_gate_ctrl_eng_delay_before__prod_f()); + gate_ctrl = set_field(gate_ctrl, + therm_gate_ctrl_eng_delay_after_m(), + therm_gate_ctrl_eng_delay_after__prod_f()); + gk20a_writel(g, therm_gate_ctrl_r(active_engine_id), gate_ctrl); + } + + idle_filter = gk20a_readl(g, therm_fecs_idle_filter_r()); + idle_filter = set_field(idle_filter, + therm_fecs_idle_filter_value_m(), + therm_fecs_idle_filter_value__prod_f()); + gk20a_writel(g, therm_fecs_idle_filter_r(), idle_filter); + + idle_filter = gk20a_readl(g, therm_hubmmu_idle_filter_r()); + idle_filter = set_field(idle_filter, + therm_hubmmu_idle_filter_value_m(), + therm_hubmmu_idle_filter_value__prod_f()); + gk20a_writel(g, therm_hubmmu_idle_filter_r(), idle_filter); + + return 0; +} diff --git a/drivers/gpu/nvgpu/gv11b/therm_gv11b.h b/drivers/gpu/nvgpu/gv11b/therm_gv11b.h new file mode 100644 index 000000000..1d89597ba --- /dev/null +++ b/drivers/gpu/nvgpu/gv11b/therm_gv11b.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef THERM_GV11B_H +#define THERM_GV11B_H + +struct gk20a; +int gv11b_elcg_init_idle_filters(struct gk20a *g); + +#endif /* THERM_GV11B_H */ diff --git a/drivers/gpu/nvgpu/include/nvgpu/enabled_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/enabled_t19x.h new file mode 100644 index 000000000..9ef1dc301 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/enabled_t19x.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVGPU_ENABLED_T19X_H__ +#define __NVGPU_ENABLED_T19X_H__ + +/* subcontexts are available */ +#define NVGPU_SUPPORT_TSG_SUBCONTEXTS 63 + +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/gmmu_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/gmmu_t19x.h new file mode 100644 index 000000000..eea51fbba --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/gmmu_t19x.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVGPU_GMMU_T19X_H__ +#define __NVGPU_GMMU_T19X_H__ + +struct nvgpu_gmmu_attrs; + +struct nvgpu_gmmu_attrs_t19x { + bool l3_alloc; +}; + +void nvgpu_gmmu_add_t19x_attrs(struct nvgpu_gmmu_attrs *attrs, u32 flags); + +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_bus_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_bus_gv100.h new file mode 100644 index 000000000..7771f1ea1 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_bus_gv100.h @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_bus_gv100_h_ +#define _hw_bus_gv100_h_ + +static inline u32 bus_sw_scratch_r(u32 i) +{ + return 0x00001580U + i*4U; +} +static inline u32 bus_bar0_window_r(void) +{ + return 0x00001700U; +} +static inline u32 bus_bar0_window_base_f(u32 v) +{ + return (v & 0xffffffU) << 0U; +} +static inline u32 bus_bar0_window_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 bus_bar0_window_target_sys_mem_coherent_f(void) +{ + return 0x2000000U; +} +static inline u32 bus_bar0_window_target_sys_mem_noncoherent_f(void) +{ + return 0x3000000U; +} +static inline u32 bus_bar0_window_target_bar0_window_base_shift_v(void) +{ + return 0x00000010U; +} +static inline u32 bus_bar1_block_r(void) +{ + return 0x00001704U; +} +static inline u32 bus_bar1_block_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 bus_bar1_block_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 bus_bar1_block_target_sys_mem_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 bus_bar1_block_target_sys_mem_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 bus_bar1_block_mode_virtual_f(void) +{ + return 0x80000000U; +} +static inline u32 bus_bar2_block_r(void) +{ + return 0x00001714U; +} +static inline u32 bus_bar2_block_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 bus_bar2_block_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 bus_bar2_block_target_sys_mem_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 bus_bar2_block_target_sys_mem_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 bus_bar2_block_mode_virtual_f(void) +{ + return 0x80000000U; +} +static inline u32 bus_bar1_block_ptr_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 bus_bar2_block_ptr_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 bus_bind_status_r(void) +{ + return 0x00001710U; +} +static inline u32 bus_bind_status_bar1_pending_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 bus_bind_status_bar1_pending_empty_f(void) +{ + return 0x0U; +} +static inline u32 bus_bind_status_bar1_pending_busy_f(void) +{ + return 0x1U; +} +static inline u32 bus_bind_status_bar1_outstanding_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 bus_bind_status_bar1_outstanding_false_f(void) +{ + return 0x0U; +} +static inline u32 bus_bind_status_bar1_outstanding_true_f(void) +{ + return 0x2U; +} +static inline u32 bus_bind_status_bar2_pending_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 bus_bind_status_bar2_pending_empty_f(void) +{ + return 0x0U; +} +static inline u32 bus_bind_status_bar2_pending_busy_f(void) +{ + return 0x4U; +} +static inline u32 bus_bind_status_bar2_outstanding_v(u32 r) +{ + return (r >> 3U) & 0x1U; +} +static inline u32 bus_bind_status_bar2_outstanding_false_f(void) +{ + return 0x0U; +} +static inline u32 bus_bind_status_bar2_outstanding_true_f(void) +{ + return 0x8U; +} +static inline u32 bus_intr_0_r(void) +{ + return 0x00001100U; +} +static inline u32 bus_intr_0_pri_squash_m(void) +{ + return 0x1U << 1U; +} +static inline u32 bus_intr_0_pri_fecserr_m(void) +{ + return 0x1U << 2U; +} +static inline u32 bus_intr_0_pri_timeout_m(void) +{ + return 0x1U << 3U; +} +static inline u32 bus_intr_en_0_r(void) +{ + return 0x00001140U; +} +static inline u32 bus_intr_en_0_pri_squash_m(void) +{ + return 0x1U << 1U; +} +static inline u32 bus_intr_en_0_pri_fecserr_m(void) +{ + return 0x1U << 2U; +} +static inline u32 bus_intr_en_0_pri_timeout_m(void) +{ + return 0x1U << 3U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ccsr_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ccsr_gv100.h new file mode 100644 index 000000000..b1478037c --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ccsr_gv100.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_ccsr_gv100_h_ +#define _hw_ccsr_gv100_h_ + +static inline u32 ccsr_channel_inst_r(u32 i) +{ + return 0x00800000U + i*8U; +} +static inline u32 ccsr_channel_inst__size_1_v(void) +{ + return 0x00001000U; +} +static inline u32 ccsr_channel_inst_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 ccsr_channel_inst_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 ccsr_channel_inst_target_sys_mem_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 ccsr_channel_inst_target_sys_mem_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 ccsr_channel_inst_bind_false_f(void) +{ + return 0x0U; +} +static inline u32 ccsr_channel_inst_bind_true_f(void) +{ + return 0x80000000U; +} +static inline u32 ccsr_channel_r(u32 i) +{ + return 0x00800004U + i*8U; +} +static inline u32 ccsr_channel__size_1_v(void) +{ + return 0x00001000U; +} +static inline u32 ccsr_channel_enable_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ccsr_channel_enable_set_f(u32 v) +{ + return (v & 0x1U) << 10U; +} +static inline u32 ccsr_channel_enable_set_true_f(void) +{ + return 0x400U; +} +static inline u32 ccsr_channel_enable_clr_true_f(void) +{ + return 0x800U; +} +static inline u32 ccsr_channel_status_v(u32 r) +{ + return (r >> 24U) & 0xfU; +} +static inline u32 ccsr_channel_status_pending_ctx_reload_v(void) +{ + return 0x00000002U; +} +static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void) +{ + return 0x00000004U; +} +static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void) +{ + return 0x0000000aU; +} +static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void) +{ + return 0x0000000bU; +} +static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void) +{ + return 0x0000000cU; +} +static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void) +{ + return 0x0000000dU; +} +static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void) +{ + return 0x0000000eU; +} +static inline u32 ccsr_channel_next_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 ccsr_channel_next_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ccsr_channel_force_ctx_reload_true_f(void) +{ + return 0x100U; +} +static inline u32 ccsr_channel_pbdma_faulted_f(u32 v) +{ + return (v & 0x1U) << 22U; +} +static inline u32 ccsr_channel_pbdma_faulted_reset_f(void) +{ + return 0x400000U; +} +static inline u32 ccsr_channel_eng_faulted_f(u32 v) +{ + return (v & 0x1U) << 23U; +} +static inline u32 ccsr_channel_eng_faulted_v(u32 r) +{ + return (r >> 23U) & 0x1U; +} +static inline u32 ccsr_channel_eng_faulted_reset_f(void) +{ + return 0x800000U; +} +static inline u32 ccsr_channel_eng_faulted_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ccsr_channel_busy_v(u32 r) +{ + return (r >> 28U) & 0x1U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ce_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ce_gv100.h new file mode 100644 index 000000000..18b5fc664 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ce_gv100.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_ce_gv100_h_ +#define _hw_ce_gv100_h_ + +static inline u32 ce_intr_status_r(u32 i) +{ + return 0x00104410U + i*128U; +} +static inline u32 ce_intr_status_blockpipe_pending_f(void) +{ + return 0x1U; +} +static inline u32 ce_intr_status_blockpipe_reset_f(void) +{ + return 0x1U; +} +static inline u32 ce_intr_status_nonblockpipe_pending_f(void) +{ + return 0x2U; +} +static inline u32 ce_intr_status_nonblockpipe_reset_f(void) +{ + return 0x2U; +} +static inline u32 ce_intr_status_launcherr_pending_f(void) +{ + return 0x4U; +} +static inline u32 ce_intr_status_launcherr_reset_f(void) +{ + return 0x4U; +} +static inline u32 ce_intr_status_invalid_config_pending_f(void) +{ + return 0x8U; +} +static inline u32 ce_intr_status_invalid_config_reset_f(void) +{ + return 0x8U; +} +static inline u32 ce_intr_status_mthd_buffer_fault_pending_f(void) +{ + return 0x10U; +} +static inline u32 ce_intr_status_mthd_buffer_fault_reset_f(void) +{ + return 0x10U; +} +static inline u32 ce_pce_map_r(void) +{ + return 0x00104028U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ctxsw_prog_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ctxsw_prog_gv100.h new file mode 100644 index 000000000..cd7928353 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ctxsw_prog_gv100.h @@ -0,0 +1,455 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_ctxsw_prog_gv100_h_ +#define _hw_ctxsw_prog_gv100_h_ + +static inline u32 ctxsw_prog_fecs_header_v(void) +{ + return 0x00000100U; +} +static inline u32 ctxsw_prog_main_image_num_gpcs_o(void) +{ + return 0x00000008U; +} +static inline u32 ctxsw_prog_main_image_ctl_o(void) +{ + return 0x0000000cU; +} +static inline u32 ctxsw_prog_main_image_ctl_type_f(u32 v) +{ + return (v & 0x3fU) << 0U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_undefined_v(void) +{ + return 0x00000000U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_opengl_v(void) +{ + return 0x00000008U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_dx9_v(void) +{ + return 0x00000010U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_dx10_v(void) +{ + return 0x00000011U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_dx11_v(void) +{ + return 0x00000012U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_compute_v(void) +{ + return 0x00000020U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_per_veid_header_v(void) +{ + return 0x00000021U; +} +static inline u32 ctxsw_prog_main_image_patch_count_o(void) +{ + return 0x00000010U; +} +static inline u32 ctxsw_prog_main_image_context_id_o(void) +{ + return 0x000000f0U; +} +static inline u32 ctxsw_prog_main_image_patch_adr_lo_o(void) +{ + return 0x00000014U; +} +static inline u32 ctxsw_prog_main_image_patch_adr_hi_o(void) +{ + return 0x00000018U; +} +static inline u32 ctxsw_prog_main_image_zcull_o(void) +{ + return 0x0000001cU; +} +static inline u32 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v(void) +{ + return 0x00000001U; +} +static inline u32 ctxsw_prog_main_image_zcull_mode_separate_buffer_v(void) +{ + return 0x00000002U; +} +static inline u32 ctxsw_prog_main_image_zcull_ptr_o(void) +{ + return 0x00000020U; +} +static inline u32 ctxsw_prog_main_image_pm_o(void) +{ + return 0x00000028U; +} +static inline u32 ctxsw_prog_main_image_pm_mode_m(void) +{ + return 0x7U << 0U; +} +static inline u32 ctxsw_prog_main_image_pm_mode_no_ctxsw_f(void) +{ + return 0x0U; +} +static inline u32 ctxsw_prog_main_image_pm_smpc_mode_m(void) +{ + return 0x7U << 3U; +} +static inline u32 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f(void) +{ + return 0x8U; +} +static inline u32 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(void) +{ + return 0x0U; +} +static inline u32 ctxsw_prog_main_image_pm_ptr_o(void) +{ + return 0x0000002cU; +} +static inline u32 ctxsw_prog_main_image_num_save_ops_o(void) +{ + return 0x000000f4U; +} +static inline u32 ctxsw_prog_main_image_num_wfi_save_ops_o(void) +{ + return 0x000000d0U; +} +static inline u32 ctxsw_prog_main_image_num_cta_save_ops_o(void) +{ + return 0x000000d4U; +} +static inline u32 ctxsw_prog_main_image_num_gfxp_save_ops_o(void) +{ + return 0x000000d8U; +} +static inline u32 ctxsw_prog_main_image_num_cilp_save_ops_o(void) +{ + return 0x000000dcU; +} +static inline u32 ctxsw_prog_main_image_num_restore_ops_o(void) +{ + return 0x000000f8U; +} +static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_o(void) +{ + return 0x00000060U; +} +static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_pm_ptr_hi_o(void) +{ + return 0x00000094U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_o(void) +{ + return 0x00000064U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_o(void) +{ + return 0x00000068U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_o(void) +{ + return 0x00000070U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_o(void) +{ + return 0x00000074U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_o(void) +{ + return 0x00000078U; +} +static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_context_buffer_ptr_o(void) +{ + return 0x0000007cU; +} +static inline u32 ctxsw_prog_main_image_context_buffer_ptr_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_magic_value_o(void) +{ + return 0x000000fcU; +} +static inline u32 ctxsw_prog_main_image_magic_value_v_value_v(void) +{ + return 0x600dc0deU; +} +static inline u32 ctxsw_prog_local_priv_register_ctl_o(void) +{ + return 0x0000000cU; +} +static inline u32 ctxsw_prog_local_priv_register_ctl_offset_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ctxsw_prog_main_image_global_cb_ptr_o(void) +{ + return 0x000000b8U; +} +static inline u32 ctxsw_prog_main_image_global_cb_ptr_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_o(void) +{ + return 0x000000bcU; +} +static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_o(void) +{ + return 0x000000c0U; +} +static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_o(void) +{ + return 0x000000c4U; +} +static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_control_block_ptr_o(void) +{ + return 0x000000c8U; +} +static inline u32 ctxsw_prog_main_image_control_block_ptr_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_o(void) +{ + return 0x000000ccU; +} +static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_o(void) +{ + return 0x000000e0U; +} +static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_o(void) +{ + return 0x000000e4U; +} +static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_local_image_ppc_info_o(void) +{ + return 0x000000f4U; +} +static inline u32 ctxsw_prog_local_image_ppc_info_num_ppcs_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ctxsw_prog_local_image_ppc_info_ppc_mask_v(u32 r) +{ + return (r >> 16U) & 0xffffU; +} +static inline u32 ctxsw_prog_local_image_num_tpcs_o(void) +{ + return 0x000000f8U; +} +static inline u32 ctxsw_prog_local_magic_value_o(void) +{ + return 0x000000fcU; +} +static inline u32 ctxsw_prog_local_magic_value_v_value_v(void) +{ + return 0xad0becabU; +} +static inline u32 ctxsw_prog_main_extended_buffer_ctl_o(void) +{ + return 0x000000ecU; +} +static inline u32 ctxsw_prog_main_extended_buffer_ctl_offset_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ctxsw_prog_main_extended_buffer_ctl_size_v(u32 r) +{ + return (r >> 16U) & 0xffU; +} +static inline u32 ctxsw_prog_extended_buffer_segments_size_in_bytes_v(void) +{ + return 0x00000100U; +} +static inline u32 ctxsw_prog_extended_marker_size_in_bytes_v(void) +{ + return 0x00000004U; +} +static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(void) +{ + return 0x00000000U; +} +static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(void) +{ + return 0x00000002U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_o(void) +{ + return 0x000000a0U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_s(void) +{ + return 2U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_m(void) +{ + return 0x3U << 0U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_v(u32 r) +{ + return (r >> 0U) & 0x3U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(void) +{ + return 0x0U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(void) +{ + return 0x2U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_addr_lo_o(void) +{ + return 0x000000a4U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_addr_hi_o(void) +{ + return 0x000000a8U; +} +static inline u32 ctxsw_prog_main_image_misc_options_o(void) +{ + return 0x0000003cU; +} +static inline u32 ctxsw_prog_main_image_misc_options_verif_features_m(void) +{ + return 0x1U << 3U; +} +static inline u32 ctxsw_prog_main_image_misc_options_verif_features_disabled_f(void) +{ + return 0x0U; +} +static inline u32 ctxsw_prog_main_image_graphics_preemption_options_o(void) +{ + return 0x00000080U; +} +static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_gfxp_f(void) +{ + return 0x1U; +} +static inline u32 ctxsw_prog_main_image_compute_preemption_options_o(void) +{ + return 0x00000084U; +} +static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(void) +{ + return 0x1U; +} +static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cilp_f(void) +{ + return 0x2U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_falcon_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_falcon_gv100.h new file mode 100644 index 000000000..122956bb1 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_falcon_gv100.h @@ -0,0 +1,599 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_falcon_gv100_h_ +#define _hw_falcon_gv100_h_ + +static inline u32 falcon_falcon_irqsset_r(void) +{ + return 0x00000000U; +} +static inline u32 falcon_falcon_irqsset_swgen0_set_f(void) +{ + return 0x40U; +} +static inline u32 falcon_falcon_irqsclr_r(void) +{ + return 0x00000004U; +} +static inline u32 falcon_falcon_irqstat_r(void) +{ + return 0x00000008U; +} +static inline u32 falcon_falcon_irqstat_halt_true_f(void) +{ + return 0x10U; +} +static inline u32 falcon_falcon_irqstat_exterr_true_f(void) +{ + return 0x20U; +} +static inline u32 falcon_falcon_irqstat_swgen0_true_f(void) +{ + return 0x40U; +} +static inline u32 falcon_falcon_irqmode_r(void) +{ + return 0x0000000cU; +} +static inline u32 falcon_falcon_irqmset_r(void) +{ + return 0x00000010U; +} +static inline u32 falcon_falcon_irqmset_gptmr_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 falcon_falcon_irqmset_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 falcon_falcon_irqmset_mthd_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 falcon_falcon_irqmset_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 falcon_falcon_irqmset_halt_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 falcon_falcon_irqmset_exterr_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 falcon_falcon_irqmset_swgen0_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 falcon_falcon_irqmset_swgen1_f(u32 v) +{ + return (v & 0x1U) << 7U; +} +static inline u32 falcon_falcon_irqmclr_r(void) +{ + return 0x00000014U; +} +static inline u32 falcon_falcon_irqmclr_gptmr_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 falcon_falcon_irqmclr_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 falcon_falcon_irqmclr_mthd_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 falcon_falcon_irqmclr_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 falcon_falcon_irqmclr_halt_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 falcon_falcon_irqmclr_exterr_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 falcon_falcon_irqmclr_swgen0_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 falcon_falcon_irqmclr_swgen1_f(u32 v) +{ + return (v & 0x1U) << 7U; +} +static inline u32 falcon_falcon_irqmclr_ext_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 falcon_falcon_irqmask_r(void) +{ + return 0x00000018U; +} +static inline u32 falcon_falcon_irqdest_r(void) +{ + return 0x0000001cU; +} +static inline u32 falcon_falcon_irqdest_host_gptmr_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 falcon_falcon_irqdest_host_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 falcon_falcon_irqdest_host_mthd_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 falcon_falcon_irqdest_host_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 falcon_falcon_irqdest_host_halt_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 falcon_falcon_irqdest_host_exterr_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 falcon_falcon_irqdest_host_swgen0_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 falcon_falcon_irqdest_host_swgen1_f(u32 v) +{ + return (v & 0x1U) << 7U; +} +static inline u32 falcon_falcon_irqdest_host_ext_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 falcon_falcon_irqdest_target_gptmr_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 falcon_falcon_irqdest_target_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 17U; +} +static inline u32 falcon_falcon_irqdest_target_mthd_f(u32 v) +{ + return (v & 0x1U) << 18U; +} +static inline u32 falcon_falcon_irqdest_target_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 19U; +} +static inline u32 falcon_falcon_irqdest_target_halt_f(u32 v) +{ + return (v & 0x1U) << 20U; +} +static inline u32 falcon_falcon_irqdest_target_exterr_f(u32 v) +{ + return (v & 0x1U) << 21U; +} +static inline u32 falcon_falcon_irqdest_target_swgen0_f(u32 v) +{ + return (v & 0x1U) << 22U; +} +static inline u32 falcon_falcon_irqdest_target_swgen1_f(u32 v) +{ + return (v & 0x1U) << 23U; +} +static inline u32 falcon_falcon_irqdest_target_ext_f(u32 v) +{ + return (v & 0xffU) << 24U; +} +static inline u32 falcon_falcon_curctx_r(void) +{ + return 0x00000050U; +} +static inline u32 falcon_falcon_nxtctx_r(void) +{ + return 0x00000054U; +} +static inline u32 falcon_falcon_mailbox0_r(void) +{ + return 0x00000040U; +} +static inline u32 falcon_falcon_mailbox1_r(void) +{ + return 0x00000044U; +} +static inline u32 falcon_falcon_itfen_r(void) +{ + return 0x00000048U; +} +static inline u32 falcon_falcon_itfen_ctxen_enable_f(void) +{ + return 0x1U; +} +static inline u32 falcon_falcon_idlestate_r(void) +{ + return 0x0000004cU; +} +static inline u32 falcon_falcon_idlestate_falcon_busy_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 falcon_falcon_idlestate_ext_busy_v(u32 r) +{ + return (r >> 1U) & 0x7fffU; +} +static inline u32 falcon_falcon_os_r(void) +{ + return 0x00000080U; +} +static inline u32 falcon_falcon_engctl_r(void) +{ + return 0x000000a4U; +} +static inline u32 falcon_falcon_cpuctl_r(void) +{ + return 0x00000100U; +} +static inline u32 falcon_falcon_cpuctl_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 falcon_falcon_cpuctl_sreset_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 falcon_falcon_cpuctl_hreset_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 falcon_falcon_cpuctl_halt_intr_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 falcon_falcon_cpuctl_halt_intr_m(void) +{ + return 0x1U << 4U; +} +static inline u32 falcon_falcon_cpuctl_halt_intr_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 falcon_falcon_cpuctl_stopped_m(void) +{ + return 0x1U << 5U; +} +static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_m(void) +{ + return 0x1U << 6U; +} +static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_v(u32 r) +{ + return (r >> 6U) & 0x1U; +} +static inline u32 falcon_falcon_cpuctl_alias_r(void) +{ + return 0x00000130U; +} +static inline u32 falcon_falcon_cpuctl_alias_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 falcon_falcon_imemc_r(u32 i) +{ + return 0x00000180U + i*16U; +} +static inline u32 falcon_falcon_imemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 falcon_falcon_imemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 falcon_falcon_imemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 falcon_falcon_imemd_r(u32 i) +{ + return 0x00000184U + i*16U; +} +static inline u32 falcon_falcon_imemt_r(u32 i) +{ + return 0x00000188U + i*16U; +} +static inline u32 falcon_falcon_sctl_r(void) +{ + return 0x00000240U; +} +static inline u32 falcon_falcon_mmu_phys_sec_r(void) +{ + return 0x00100ce4U; +} +static inline u32 falcon_falcon_bootvec_r(void) +{ + return 0x00000104U; +} +static inline u32 falcon_falcon_bootvec_vec_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 falcon_falcon_dmactl_r(void) +{ + return 0x0000010cU; +} +static inline u32 falcon_falcon_dmactl_dmem_scrubbing_m(void) +{ + return 0x1U << 1U; +} +static inline u32 falcon_falcon_dmactl_imem_scrubbing_m(void) +{ + return 0x1U << 2U; +} +static inline u32 falcon_falcon_dmactl_require_ctx_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 falcon_falcon_hwcfg_r(void) +{ + return 0x00000108U; +} +static inline u32 falcon_falcon_hwcfg_imem_size_v(u32 r) +{ + return (r >> 0U) & 0x1ffU; +} +static inline u32 falcon_falcon_hwcfg_dmem_size_v(u32 r) +{ + return (r >> 9U) & 0x1ffU; +} +static inline u32 falcon_falcon_dmatrfbase_r(void) +{ + return 0x00000110U; +} +static inline u32 falcon_falcon_dmatrfbase1_r(void) +{ + return 0x00000128U; +} +static inline u32 falcon_falcon_dmatrfmoffs_r(void) +{ + return 0x00000114U; +} +static inline u32 falcon_falcon_dmatrfcmd_r(void) +{ + return 0x00000118U; +} +static inline u32 falcon_falcon_dmatrfcmd_imem_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 falcon_falcon_dmatrfcmd_write_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 falcon_falcon_dmatrfcmd_size_f(u32 v) +{ + return (v & 0x7U) << 8U; +} +static inline u32 falcon_falcon_dmatrfcmd_ctxdma_f(u32 v) +{ + return (v & 0x7U) << 12U; +} +static inline u32 falcon_falcon_dmatrffboffs_r(void) +{ + return 0x0000011cU; +} +static inline u32 falcon_falcon_imctl_debug_r(void) +{ + return 0x0000015cU; +} +static inline u32 falcon_falcon_imctl_debug_addr_blk_f(u32 v) +{ + return (v & 0xffffffU) << 0U; +} +static inline u32 falcon_falcon_imctl_debug_cmd_f(u32 v) +{ + return (v & 0x7U) << 24U; +} +static inline u32 falcon_falcon_imstat_r(void) +{ + return 0x00000144U; +} +static inline u32 falcon_falcon_traceidx_r(void) +{ + return 0x00000148U; +} +static inline u32 falcon_falcon_traceidx_maxidx_v(u32 r) +{ + return (r >> 16U) & 0xffU; +} +static inline u32 falcon_falcon_traceidx_idx_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 falcon_falcon_tracepc_r(void) +{ + return 0x0000014cU; +} +static inline u32 falcon_falcon_tracepc_pc_v(u32 r) +{ + return (r >> 0U) & 0xffffffU; +} +static inline u32 falcon_falcon_exterraddr_r(void) +{ + return 0x00000168U; +} +static inline u32 falcon_falcon_exterrstat_r(void) +{ + return 0x0000016cU; +} +static inline u32 falcon_falcon_exterrstat_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 falcon_falcon_exterrstat_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 falcon_falcon_exterrstat_valid_true_v(void) +{ + return 0x00000001U; +} +static inline u32 falcon_falcon_icd_cmd_r(void) +{ + return 0x00000200U; +} +static inline u32 falcon_falcon_icd_cmd_opc_s(void) +{ + return 4U; +} +static inline u32 falcon_falcon_icd_cmd_opc_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 falcon_falcon_icd_cmd_opc_m(void) +{ + return 0xfU << 0U; +} +static inline u32 falcon_falcon_icd_cmd_opc_v(u32 r) +{ + return (r >> 0U) & 0xfU; +} +static inline u32 falcon_falcon_icd_cmd_opc_rreg_f(void) +{ + return 0x8U; +} +static inline u32 falcon_falcon_icd_cmd_opc_rstat_f(void) +{ + return 0xeU; +} +static inline u32 falcon_falcon_icd_cmd_idx_f(u32 v) +{ + return (v & 0x1fU) << 8U; +} +static inline u32 falcon_falcon_icd_rdata_r(void) +{ + return 0x0000020cU; +} +static inline u32 falcon_falcon_dmemc_r(u32 i) +{ + return 0x000001c0U + i*8U; +} +static inline u32 falcon_falcon_dmemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 falcon_falcon_dmemc_offs_m(void) +{ + return 0x3fU << 2U; +} +static inline u32 falcon_falcon_dmemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 falcon_falcon_dmemc_blk_m(void) +{ + return 0xffU << 8U; +} +static inline u32 falcon_falcon_dmemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 falcon_falcon_dmemc_aincr_f(u32 v) +{ + return (v & 0x1U) << 25U; +} +static inline u32 falcon_falcon_dmemd_r(u32 i) +{ + return 0x000001c4U + i*8U; +} +static inline u32 falcon_falcon_debug1_r(void) +{ + return 0x00000090U; +} +static inline u32 falcon_falcon_debug1_ctxsw_mode_s(void) +{ + return 1U; +} +static inline u32 falcon_falcon_debug1_ctxsw_mode_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 falcon_falcon_debug1_ctxsw_mode_m(void) +{ + return 0x1U << 16U; +} +static inline u32 falcon_falcon_debug1_ctxsw_mode_v(u32 r) +{ + return (r >> 16U) & 0x1U; +} +static inline u32 falcon_falcon_debug1_ctxsw_mode_init_f(void) +{ + return 0x0U; +} +static inline u32 falcon_falcon_debuginfo_r(void) +{ + return 0x00000094U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fb_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fb_gv100.h new file mode 100644 index 000000000..a4fcd1e67 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fb_gv100.h @@ -0,0 +1,1511 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_fb_gv100_h_ +#define _hw_fb_gv100_h_ + +static inline u32 fb_fbhub_num_active_ltcs_r(void) +{ + return 0x00100800U; +} +static inline u32 fb_mmu_ctrl_r(void) +{ + return 0x00100c80U; +} +static inline u32 fb_mmu_ctrl_vm_pg_size_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 fb_mmu_ctrl_vm_pg_size_128kb_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_ctrl_vm_pg_size_64kb_f(void) +{ + return 0x1U; +} +static inline u32 fb_mmu_ctrl_pri_fifo_empty_v(u32 r) +{ + return (r >> 15U) & 0x1U; +} +static inline u32 fb_mmu_ctrl_pri_fifo_empty_false_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_ctrl_pri_fifo_space_v(u32 r) +{ + return (r >> 16U) & 0xffU; +} +static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_v(u32 r) +{ + return (r >> 11U) & 0x1U; +} +static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_true_f(void) +{ + return 0x800U; +} +static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_false_f(void) +{ + return 0x0U; +} +static inline u32 fb_priv_mmu_phy_secure_r(void) +{ + return 0x00100ce4U; +} +static inline u32 fb_mmu_invalidate_pdb_r(void) +{ + return 0x00100cb8U; +} +static inline u32 fb_mmu_invalidate_pdb_aperture_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_invalidate_pdb_aperture_sys_mem_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_invalidate_pdb_addr_f(u32 v) +{ + return (v & 0xfffffffU) << 4U; +} +static inline u32 fb_mmu_invalidate_r(void) +{ + return 0x00100cbcU; +} +static inline u32 fb_mmu_invalidate_all_va_true_f(void) +{ + return 0x1U; +} +static inline u32 fb_mmu_invalidate_all_pdb_true_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_invalidate_hubtlb_only_s(void) +{ + return 1U; +} +static inline u32 fb_mmu_invalidate_hubtlb_only_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 fb_mmu_invalidate_hubtlb_only_m(void) +{ + return 0x1U << 2U; +} +static inline u32 fb_mmu_invalidate_hubtlb_only_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 fb_mmu_invalidate_hubtlb_only_true_f(void) +{ + return 0x4U; +} +static inline u32 fb_mmu_invalidate_replay_s(void) +{ + return 3U; +} +static inline u32 fb_mmu_invalidate_replay_f(u32 v) +{ + return (v & 0x7U) << 3U; +} +static inline u32 fb_mmu_invalidate_replay_m(void) +{ + return 0x7U << 3U; +} +static inline u32 fb_mmu_invalidate_replay_v(u32 r) +{ + return (r >> 3U) & 0x7U; +} +static inline u32 fb_mmu_invalidate_replay_none_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_invalidate_replay_start_f(void) +{ + return 0x8U; +} +static inline u32 fb_mmu_invalidate_replay_start_ack_all_f(void) +{ + return 0x10U; +} +static inline u32 fb_mmu_invalidate_replay_cancel_global_f(void) +{ + return 0x20U; +} +static inline u32 fb_mmu_invalidate_sys_membar_s(void) +{ + return 1U; +} +static inline u32 fb_mmu_invalidate_sys_membar_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 fb_mmu_invalidate_sys_membar_m(void) +{ + return 0x1U << 6U; +} +static inline u32 fb_mmu_invalidate_sys_membar_v(u32 r) +{ + return (r >> 6U) & 0x1U; +} +static inline u32 fb_mmu_invalidate_sys_membar_true_f(void) +{ + return 0x40U; +} +static inline u32 fb_mmu_invalidate_ack_s(void) +{ + return 2U; +} +static inline u32 fb_mmu_invalidate_ack_f(u32 v) +{ + return (v & 0x3U) << 7U; +} +static inline u32 fb_mmu_invalidate_ack_m(void) +{ + return 0x3U << 7U; +} +static inline u32 fb_mmu_invalidate_ack_v(u32 r) +{ + return (r >> 7U) & 0x3U; +} +static inline u32 fb_mmu_invalidate_ack_ack_none_required_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_invalidate_ack_ack_intranode_f(void) +{ + return 0x100U; +} +static inline u32 fb_mmu_invalidate_ack_ack_globally_f(void) +{ + return 0x80U; +} +static inline u32 fb_mmu_invalidate_cancel_client_id_s(void) +{ + return 6U; +} +static inline u32 fb_mmu_invalidate_cancel_client_id_f(u32 v) +{ + return (v & 0x3fU) << 9U; +} +static inline u32 fb_mmu_invalidate_cancel_client_id_m(void) +{ + return 0x3fU << 9U; +} +static inline u32 fb_mmu_invalidate_cancel_client_id_v(u32 r) +{ + return (r >> 9U) & 0x3fU; +} +static inline u32 fb_mmu_invalidate_cancel_gpc_id_s(void) +{ + return 5U; +} +static inline u32 fb_mmu_invalidate_cancel_gpc_id_f(u32 v) +{ + return (v & 0x1fU) << 15U; +} +static inline u32 fb_mmu_invalidate_cancel_gpc_id_m(void) +{ + return 0x1fU << 15U; +} +static inline u32 fb_mmu_invalidate_cancel_gpc_id_v(u32 r) +{ + return (r >> 15U) & 0x1fU; +} +static inline u32 fb_mmu_invalidate_cancel_client_type_s(void) +{ + return 1U; +} +static inline u32 fb_mmu_invalidate_cancel_client_type_f(u32 v) +{ + return (v & 0x1U) << 20U; +} +static inline u32 fb_mmu_invalidate_cancel_client_type_m(void) +{ + return 0x1U << 20U; +} +static inline u32 fb_mmu_invalidate_cancel_client_type_v(u32 r) +{ + return (r >> 20U) & 0x1U; +} +static inline u32 fb_mmu_invalidate_cancel_client_type_gpc_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_invalidate_cancel_client_type_hub_f(void) +{ + return 0x100000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_s(void) +{ + return 3U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_f(u32 v) +{ + return (v & 0x7U) << 24U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_m(void) +{ + return 0x7U << 24U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_v(u32 r) +{ + return (r >> 24U) & 0x7U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_all_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_pte_only_f(void) +{ + return 0x1000000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde0_f(void) +{ + return 0x2000000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde1_f(void) +{ + return 0x3000000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde2_f(void) +{ + return 0x4000000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde3_f(void) +{ + return 0x5000000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde4_f(void) +{ + return 0x6000000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde5_f(void) +{ + return 0x7000000U; +} +static inline u32 fb_mmu_invalidate_trigger_s(void) +{ + return 1U; +} +static inline u32 fb_mmu_invalidate_trigger_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 fb_mmu_invalidate_trigger_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_mmu_invalidate_trigger_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 fb_mmu_invalidate_trigger_true_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_mmu_debug_wr_r(void) +{ + return 0x00100cc8U; +} +static inline u32 fb_mmu_debug_wr_aperture_s(void) +{ + return 2U; +} +static inline u32 fb_mmu_debug_wr_aperture_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 fb_mmu_debug_wr_aperture_m(void) +{ + return 0x3U << 0U; +} +static inline u32 fb_mmu_debug_wr_aperture_v(u32 r) +{ + return (r >> 0U) & 0x3U; +} +static inline u32 fb_mmu_debug_wr_aperture_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_debug_wr_aperture_sys_mem_coh_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(void) +{ + return 0x3U; +} +static inline u32 fb_mmu_debug_wr_vol_false_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_debug_wr_vol_true_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_debug_wr_vol_true_f(void) +{ + return 0x4U; +} +static inline u32 fb_mmu_debug_wr_addr_f(u32 v) +{ + return (v & 0xfffffffU) << 4U; +} +static inline u32 fb_mmu_debug_wr_addr_alignment_v(void) +{ + return 0x0000000cU; +} +static inline u32 fb_mmu_debug_rd_r(void) +{ + return 0x00100cccU; +} +static inline u32 fb_mmu_debug_rd_aperture_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_debug_rd_aperture_sys_mem_coh_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_debug_rd_aperture_sys_mem_ncoh_f(void) +{ + return 0x3U; +} +static inline u32 fb_mmu_debug_rd_vol_false_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_debug_rd_addr_f(u32 v) +{ + return (v & 0xfffffffU) << 4U; +} +static inline u32 fb_mmu_debug_rd_addr_alignment_v(void) +{ + return 0x0000000cU; +} +static inline u32 fb_mmu_debug_ctrl_r(void) +{ + return 0x00100cc4U; +} +static inline u32 fb_mmu_debug_ctrl_debug_v(u32 r) +{ + return (r >> 16U) & 0x1U; +} +static inline u32 fb_mmu_debug_ctrl_debug_m(void) +{ + return 0x1U << 16U; +} +static inline u32 fb_mmu_debug_ctrl_debug_enabled_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_debug_ctrl_debug_disabled_v(void) +{ + return 0x00000000U; +} +static inline u32 fb_mmu_vpr_info_r(void) +{ + return 0x00100cd0U; +} +static inline u32 fb_mmu_vpr_info_index_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 fb_mmu_vpr_info_index_v(u32 r) +{ + return (r >> 0U) & 0x3U; +} +static inline u32 fb_mmu_vpr_info_index_m(void) +{ + return 0x3U << 0U; +} +static inline u32 fb_mmu_vpr_info_index_addr_lo_v(void) +{ + return 0x00000000U; +} +static inline u32 fb_mmu_vpr_info_index_addr_hi_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_vpr_info_index_cya_lo_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_vpr_info_index_cya_hi_v(void) +{ + return 0x00000003U; +} +static inline u32 fb_mmu_vpr_info_cya_lo_in_use_m(void) +{ + return 0x1U << 4U; +} +static inline u32 fb_mmu_vpr_info_fetch_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 fb_mmu_vpr_info_fetch_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 fb_mmu_vpr_info_fetch_false_v(void) +{ + return 0x00000000U; +} +static inline u32 fb_mmu_vpr_info_fetch_true_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_niso_flush_sysmem_addr_r(void) +{ + return 0x00100c10U; +} +static inline u32 fb_niso_intr_r(void) +{ + return 0x00100a20U; +} +static inline u32 fb_niso_intr_hub_access_counter_notify_m(void) +{ + return 0x1U << 0U; +} +static inline u32 fb_niso_intr_hub_access_counter_notify_pending_f(void) +{ + return 0x1U; +} +static inline u32 fb_niso_intr_hub_access_counter_error_m(void) +{ + return 0x1U << 1U; +} +static inline u32 fb_niso_intr_hub_access_counter_error_pending_f(void) +{ + return 0x2U; +} +static inline u32 fb_niso_intr_mmu_replayable_fault_notify_m(void) +{ + return 0x1U << 27U; +} +static inline u32 fb_niso_intr_mmu_replayable_fault_notify_pending_f(void) +{ + return 0x8000000U; +} +static inline u32 fb_niso_intr_mmu_replayable_fault_overflow_m(void) +{ + return 0x1U << 28U; +} +static inline u32 fb_niso_intr_mmu_replayable_fault_overflow_pending_f(void) +{ + return 0x10000000U; +} +static inline u32 fb_niso_intr_mmu_nonreplayable_fault_notify_m(void) +{ + return 0x1U << 29U; +} +static inline u32 fb_niso_intr_mmu_nonreplayable_fault_notify_pending_f(void) +{ + return 0x20000000U; +} +static inline u32 fb_niso_intr_mmu_nonreplayable_fault_overflow_m(void) +{ + return 0x1U << 30U; +} +static inline u32 fb_niso_intr_mmu_nonreplayable_fault_overflow_pending_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_niso_intr_mmu_other_fault_notify_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_niso_intr_mmu_other_fault_notify_pending_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_niso_intr_en_r(u32 i) +{ + return 0x00100a24U + i*4U; +} +static inline u32 fb_niso_intr_en__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_niso_intr_en_hub_access_counter_notify_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 fb_niso_intr_en_hub_access_counter_notify_enabled_f(void) +{ + return 0x1U; +} +static inline u32 fb_niso_intr_en_hub_access_counter_error_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 fb_niso_intr_en_hub_access_counter_error_enabled_f(void) +{ + return 0x2U; +} +static inline u32 fb_niso_intr_en_mmu_replayable_fault_notify_f(u32 v) +{ + return (v & 0x1U) << 27U; +} +static inline u32 fb_niso_intr_en_mmu_replayable_fault_notify_enabled_f(void) +{ + return 0x8000000U; +} +static inline u32 fb_niso_intr_en_mmu_replayable_fault_overflow_f(u32 v) +{ + return (v & 0x1U) << 28U; +} +static inline u32 fb_niso_intr_en_mmu_replayable_fault_overflow_enabled_f(void) +{ + return 0x10000000U; +} +static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_notify_f(u32 v) +{ + return (v & 0x1U) << 29U; +} +static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_notify_enabled_f(void) +{ + return 0x20000000U; +} +static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_overflow_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_overflow_enabled_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_niso_intr_en_mmu_other_fault_notify_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 fb_niso_intr_en_mmu_other_fault_notify_enabled_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_niso_intr_en_set_r(u32 i) +{ + return 0x00100a2cU + i*4U; +} +static inline u32 fb_niso_intr_en_set__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_niso_intr_en_set_hub_access_counter_notify_m(void) +{ + return 0x1U << 0U; +} +static inline u32 fb_niso_intr_en_set_hub_access_counter_notify_set_f(void) +{ + return 0x1U; +} +static inline u32 fb_niso_intr_en_set_hub_access_counter_error_m(void) +{ + return 0x1U << 1U; +} +static inline u32 fb_niso_intr_en_set_hub_access_counter_error_set_f(void) +{ + return 0x2U; +} +static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_notify_m(void) +{ + return 0x1U << 27U; +} +static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_notify_set_f(void) +{ + return 0x8000000U; +} +static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m(void) +{ + return 0x1U << 28U; +} +static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_overflow_set_f(void) +{ + return 0x10000000U; +} +static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m(void) +{ + return 0x1U << 29U; +} +static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_set_f(void) +{ + return 0x20000000U; +} +static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m(void) +{ + return 0x1U << 30U; +} +static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_set_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_set_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_niso_intr_en_clr_r(u32 i) +{ + return 0x00100a34U + i*4U; +} +static inline u32 fb_niso_intr_en_clr__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_niso_intr_en_clr_hub_access_counter_notify_m(void) +{ + return 0x1U << 0U; +} +static inline u32 fb_niso_intr_en_clr_hub_access_counter_notify_set_f(void) +{ + return 0x1U; +} +static inline u32 fb_niso_intr_en_clr_hub_access_counter_error_m(void) +{ + return 0x1U << 1U; +} +static inline u32 fb_niso_intr_en_clr_hub_access_counter_error_set_f(void) +{ + return 0x2U; +} +static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_notify_m(void) +{ + return 0x1U << 27U; +} +static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_notify_set_f(void) +{ + return 0x8000000U; +} +static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_m(void) +{ + return 0x1U << 28U; +} +static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_set_f(void) +{ + return 0x10000000U; +} +static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_m(void) +{ + return 0x1U << 29U; +} +static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_set_f(void) +{ + return 0x20000000U; +} +static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_m(void) +{ + return 0x1U << 30U; +} +static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_set_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_niso_intr_en_clr_mmu_other_fault_notify_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_niso_intr_en_clr_mmu_other_fault_notify_set_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_niso_intr_en_clr_mmu_non_replay_fault_buffer_v(void) +{ + return 0x00000000U; +} +static inline u32 fb_niso_intr_en_clr_mmu_replay_fault_buffer_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_lo_r(u32 i) +{ + return 0x00100e24U + i*20U; +} +static inline u32 fb_mmu_fault_buffer_lo__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_mode_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_mode_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_mode_virtual_v(void) +{ + return 0x00000000U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_mode_virtual_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_mode_physical_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_mode_physical_f(void) +{ + return 0x1U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_f(u32 v) +{ + return (v & 0x3U) << 1U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_v(u32 r) +{ + return (r >> 1U) & 0x3U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_coh_f(void) +{ + return 0x4U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_nocoh_v(void) +{ + return 0x00000003U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_nocoh_f(void) +{ + return 0x6U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_vol_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_vol_v(u32 r) +{ + return (r >> 3U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_v(u32 r) +{ + return (r >> 12U) & 0xfffffU; +} +static inline u32 fb_mmu_fault_buffer_hi_r(u32 i) +{ + return 0x00100e28U + i*20U; +} +static inline u32 fb_mmu_fault_buffer_hi__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_buffer_hi_addr_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 fb_mmu_fault_buffer_hi_addr_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 fb_mmu_fault_buffer_get_r(u32 i) +{ + return 0x00100e2cU + i*20U; +} +static inline u32 fb_mmu_fault_buffer_get__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_buffer_get_ptr_f(u32 v) +{ + return (v & 0xfffffU) << 0U; +} +static inline u32 fb_mmu_fault_buffer_get_ptr_m(void) +{ + return 0xfffffU << 0U; +} +static inline u32 fb_mmu_fault_buffer_get_ptr_v(u32 r) +{ + return (r >> 0U) & 0xfffffU; +} +static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_m(void) +{ + return 0x1U << 30U; +} +static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_clear_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_mmu_fault_buffer_get_overflow_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 fb_mmu_fault_buffer_get_overflow_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_mmu_fault_buffer_get_overflow_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_get_overflow_clear_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_mmu_fault_buffer_put_r(u32 i) +{ + return 0x00100e30U + i*20U; +} +static inline u32 fb_mmu_fault_buffer_put__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_buffer_put_ptr_f(u32 v) +{ + return (v & 0xfffffU) << 0U; +} +static inline u32 fb_mmu_fault_buffer_put_ptr_v(u32 r) +{ + return (r >> 0U) & 0xfffffU; +} +static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_yes_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_yes_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_no_v(void) +{ + return 0x00000000U; +} +static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_no_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_buffer_put_overflow_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 fb_mmu_fault_buffer_put_overflow_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_put_overflow_yes_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_put_overflow_yes_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_mmu_fault_buffer_size_r(u32 i) +{ + return 0x00100e34U + i*20U; +} +static inline u32 fb_mmu_fault_buffer_size__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_buffer_size_val_f(u32 v) +{ + return (v & 0xfffffU) << 0U; +} +static inline u32 fb_mmu_fault_buffer_size_val_v(u32 r) +{ + return (r >> 0U) & 0xfffffU; +} +static inline u32 fb_mmu_fault_buffer_size_overflow_intr_f(u32 v) +{ + return (v & 0x1U) << 29U; +} +static inline u32 fb_mmu_fault_buffer_size_overflow_intr_v(u32 r) +{ + return (r >> 29U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_size_overflow_intr_enable_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_size_overflow_intr_enable_f(void) +{ + return 0x20000000U; +} +static inline u32 fb_mmu_fault_buffer_size_set_default_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 fb_mmu_fault_buffer_size_set_default_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_size_set_default_yes_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_size_set_default_yes_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_mmu_fault_buffer_size_enable_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 fb_mmu_fault_buffer_size_enable_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_mmu_fault_buffer_size_enable_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_size_enable_true_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_size_enable_true_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_mmu_fault_addr_lo_r(void) +{ + return 0x00100e4cU; +} +static inline u32 fb_mmu_fault_addr_lo_phys_aperture_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 fb_mmu_fault_addr_lo_phys_aperture_v(u32 r) +{ + return (r >> 0U) & 0x3U; +} +static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_coh_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_nocoh_v(void) +{ + return 0x00000003U; +} +static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_nocoh_f(void) +{ + return 0x3U; +} +static inline u32 fb_mmu_fault_addr_lo_addr_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 fb_mmu_fault_addr_lo_addr_v(u32 r) +{ + return (r >> 12U) & 0xfffffU; +} +static inline u32 fb_mmu_fault_addr_hi_r(void) +{ + return 0x00100e50U; +} +static inline u32 fb_mmu_fault_addr_hi_addr_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 fb_mmu_fault_addr_hi_addr_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 fb_mmu_fault_inst_lo_r(void) +{ + return 0x00100e54U; +} +static inline u32 fb_mmu_fault_inst_lo_engine_id_v(u32 r) +{ + return (r >> 0U) & 0x1ffU; +} +static inline u32 fb_mmu_fault_inst_lo_aperture_v(u32 r) +{ + return (r >> 10U) & 0x3U; +} +static inline u32 fb_mmu_fault_inst_lo_aperture_sys_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_inst_lo_aperture_sys_nocoh_v(void) +{ + return 0x00000003U; +} +static inline u32 fb_mmu_fault_inst_lo_addr_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 fb_mmu_fault_inst_lo_addr_v(u32 r) +{ + return (r >> 12U) & 0xfffffU; +} +static inline u32 fb_mmu_fault_inst_hi_r(void) +{ + return 0x00100e58U; +} +static inline u32 fb_mmu_fault_inst_hi_addr_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 fb_mmu_fault_info_r(void) +{ + return 0x00100e5cU; +} +static inline u32 fb_mmu_fault_info_fault_type_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 fb_mmu_fault_info_replayable_fault_v(u32 r) +{ + return (r >> 7U) & 0x1U; +} +static inline u32 fb_mmu_fault_info_client_v(u32 r) +{ + return (r >> 8U) & 0x7fU; +} +static inline u32 fb_mmu_fault_info_access_type_v(u32 r) +{ + return (r >> 16U) & 0xfU; +} +static inline u32 fb_mmu_fault_info_client_type_v(u32 r) +{ + return (r >> 20U) & 0x1U; +} +static inline u32 fb_mmu_fault_info_gpc_id_v(u32 r) +{ + return (r >> 24U) & 0x1fU; +} +static inline u32 fb_mmu_fault_info_protected_mode_v(u32 r) +{ + return (r >> 29U) & 0x1U; +} +static inline u32 fb_mmu_fault_info_replayable_fault_en_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 fb_mmu_fault_info_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 fb_mmu_fault_status_r(void) +{ + return 0x00100e60U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_phys_m(void) +{ + return 0x1U << 0U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_phys_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_phys_set_f(void) +{ + return 0x1U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_phys_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_phys_clear_f(void) +{ + return 0x1U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_virt_m(void) +{ + return 0x1U << 1U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_virt_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_virt_set_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_virt_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_virt_clear_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_phys_m(void) +{ + return 0x1U << 2U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_phys_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_phys_set_f(void) +{ + return 0x4U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_phys_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_phys_clear_f(void) +{ + return 0x4U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_virt_m(void) +{ + return 0x1U << 3U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_virt_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_virt_set_f(void) +{ + return 0x8U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_virt_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_virt_clear_f(void) +{ + return 0x8U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_phys_m(void) +{ + return 0x1U << 4U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_phys_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_phys_set_f(void) +{ + return 0x10U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_phys_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_phys_clear_f(void) +{ + return 0x10U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_virt_m(void) +{ + return 0x1U << 5U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_virt_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_virt_set_f(void) +{ + return 0x20U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_virt_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_virt_clear_f(void) +{ + return 0x20U; +} +static inline u32 fb_mmu_fault_status_dropped_other_phys_m(void) +{ + return 0x1U << 6U; +} +static inline u32 fb_mmu_fault_status_dropped_other_phys_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_other_phys_set_f(void) +{ + return 0x40U; +} +static inline u32 fb_mmu_fault_status_dropped_other_phys_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_other_phys_clear_f(void) +{ + return 0x40U; +} +static inline u32 fb_mmu_fault_status_dropped_other_virt_m(void) +{ + return 0x1U << 7U; +} +static inline u32 fb_mmu_fault_status_dropped_other_virt_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_other_virt_set_f(void) +{ + return 0x80U; +} +static inline u32 fb_mmu_fault_status_dropped_other_virt_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_other_virt_clear_f(void) +{ + return 0x80U; +} +static inline u32 fb_mmu_fault_status_replayable_m(void) +{ + return 0x1U << 8U; +} +static inline u32 fb_mmu_fault_status_replayable_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_replayable_set_f(void) +{ + return 0x100U; +} +static inline u32 fb_mmu_fault_status_replayable_reset_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_status_non_replayable_m(void) +{ + return 0x1U << 9U; +} +static inline u32 fb_mmu_fault_status_non_replayable_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_non_replayable_set_f(void) +{ + return 0x200U; +} +static inline u32 fb_mmu_fault_status_non_replayable_reset_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_status_replayable_error_m(void) +{ + return 0x1U << 10U; +} +static inline u32 fb_mmu_fault_status_replayable_error_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_replayable_error_set_f(void) +{ + return 0x400U; +} +static inline u32 fb_mmu_fault_status_replayable_error_reset_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_status_non_replayable_error_m(void) +{ + return 0x1U << 11U; +} +static inline u32 fb_mmu_fault_status_non_replayable_error_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_non_replayable_error_set_f(void) +{ + return 0x800U; +} +static inline u32 fb_mmu_fault_status_non_replayable_error_reset_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_status_replayable_overflow_m(void) +{ + return 0x1U << 12U; +} +static inline u32 fb_mmu_fault_status_replayable_overflow_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_replayable_overflow_set_f(void) +{ + return 0x1000U; +} +static inline u32 fb_mmu_fault_status_replayable_overflow_reset_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_status_non_replayable_overflow_m(void) +{ + return 0x1U << 13U; +} +static inline u32 fb_mmu_fault_status_non_replayable_overflow_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_non_replayable_overflow_set_f(void) +{ + return 0x2000U; +} +static inline u32 fb_mmu_fault_status_non_replayable_overflow_reset_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_m(void) +{ + return 0x1U << 14U; +} +static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_set_f(void) +{ + return 0x4000U; +} +static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_m(void) +{ + return 0x1U << 15U; +} +static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_set_f(void) +{ + return 0x8000U; +} +static inline u32 fb_mmu_fault_status_busy_m(void) +{ + return 0x1U << 30U; +} +static inline u32 fb_mmu_fault_status_busy_true_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_busy_true_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_mmu_fault_status_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_mmu_fault_status_valid_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_valid_set_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_mmu_fault_status_valid_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_valid_clear_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_mmu_local_memory_range_r(void) +{ + return 0x00100ce0U; +} +static inline u32 fb_mmu_local_memory_range_lower_scale_v(u32 r) +{ + return (r >> 0U) & 0xfU; +} +static inline u32 fb_mmu_local_memory_range_lower_mag_v(u32 r) +{ + return (r >> 4U) & 0x3fU; +} +static inline u32 fb_mmu_local_memory_range_ecc_mode_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 fb_niso_scrub_status_r(void) +{ + return 0x00100b20U; +} +static inline u32 fb_niso_scrub_status_flag_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 fb_mmu_priv_level_mask_r(void) +{ + return 0x00100cdcU; +} +static inline u32 fb_mmu_priv_level_mask_write_violation_m(void) +{ + return 0x1U << 7U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fifo_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fifo_gv100.h new file mode 100644 index 000000000..743afb1e5 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fifo_gv100.h @@ -0,0 +1,551 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_fifo_gv100_h_ +#define _hw_fifo_gv100_h_ + +static inline u32 fifo_bar1_base_r(void) +{ + return 0x00002254U; +} +static inline u32 fifo_bar1_base_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 fifo_bar1_base_ptr_align_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 fifo_bar1_base_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 fifo_bar1_base_valid_true_f(void) +{ + return 0x10000000U; +} +static inline u32 fifo_userd_writeback_r(void) +{ + return 0x0000225cU; +} +static inline u32 fifo_userd_writeback_timer_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 fifo_userd_writeback_timer_disabled_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_userd_writeback_timer_shorter_v(void) +{ + return 0x00000003U; +} +static inline u32 fifo_userd_writeback_timer_100us_v(void) +{ + return 0x00000064U; +} +static inline u32 fifo_userd_writeback_timescale_f(u32 v) +{ + return (v & 0xfU) << 12U; +} +static inline u32 fifo_userd_writeback_timescale_0_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_runlist_base_r(void) +{ + return 0x00002270U; +} +static inline u32 fifo_runlist_base_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 fifo_runlist_base_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 fifo_runlist_base_target_sys_mem_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 fifo_runlist_base_target_sys_mem_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 fifo_runlist_r(void) +{ + return 0x00002274U; +} +static inline u32 fifo_runlist_engine_f(u32 v) +{ + return (v & 0xfU) << 20U; +} +static inline u32 fifo_eng_runlist_base_r(u32 i) +{ + return 0x00002280U + i*8U; +} +static inline u32 fifo_eng_runlist_base__size_1_v(void) +{ + return 0x0000000dU; +} +static inline u32 fifo_eng_runlist_r(u32 i) +{ + return 0x00002284U + i*8U; +} +static inline u32 fifo_eng_runlist__size_1_v(void) +{ + return 0x0000000dU; +} +static inline u32 fifo_eng_runlist_length_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 fifo_eng_runlist_length_max_v(void) +{ + return 0x0000ffffU; +} +static inline u32 fifo_eng_runlist_pending_true_f(void) +{ + return 0x100000U; +} +static inline u32 fifo_pb_timeslice_r(u32 i) +{ + return 0x00002350U + i*4U; +} +static inline u32 fifo_pb_timeslice_timeout_16_f(void) +{ + return 0x10U; +} +static inline u32 fifo_pb_timeslice_timescale_0_f(void) +{ + return 0x0U; +} +static inline u32 fifo_pb_timeslice_enable_true_f(void) +{ + return 0x10000000U; +} +static inline u32 fifo_pbdma_map_r(u32 i) +{ + return 0x00002390U + i*4U; +} +static inline u32 fifo_intr_0_r(void) +{ + return 0x00002100U; +} +static inline u32 fifo_intr_0_bind_error_pending_f(void) +{ + return 0x1U; +} +static inline u32 fifo_intr_0_bind_error_reset_f(void) +{ + return 0x1U; +} +static inline u32 fifo_intr_0_sched_error_pending_f(void) +{ + return 0x100U; +} +static inline u32 fifo_intr_0_sched_error_reset_f(void) +{ + return 0x100U; +} +static inline u32 fifo_intr_0_chsw_error_pending_f(void) +{ + return 0x10000U; +} +static inline u32 fifo_intr_0_chsw_error_reset_f(void) +{ + return 0x10000U; +} +static inline u32 fifo_intr_0_fb_flush_timeout_pending_f(void) +{ + return 0x800000U; +} +static inline u32 fifo_intr_0_fb_flush_timeout_reset_f(void) +{ + return 0x800000U; +} +static inline u32 fifo_intr_0_lb_error_pending_f(void) +{ + return 0x1000000U; +} +static inline u32 fifo_intr_0_lb_error_reset_f(void) +{ + return 0x1000000U; +} +static inline u32 fifo_intr_0_pbdma_intr_pending_f(void) +{ + return 0x20000000U; +} +static inline u32 fifo_intr_0_runlist_event_pending_f(void) +{ + return 0x40000000U; +} +static inline u32 fifo_intr_0_channel_intr_pending_f(void) +{ + return 0x80000000U; +} +static inline u32 fifo_intr_en_0_r(void) +{ + return 0x00002140U; +} +static inline u32 fifo_intr_en_0_sched_error_f(u32 v) +{ + return (v & 0x1U) << 8U; +} +static inline u32 fifo_intr_en_0_sched_error_m(void) +{ + return 0x1U << 8U; +} +static inline u32 fifo_intr_en_1_r(void) +{ + return 0x00002528U; +} +static inline u32 fifo_intr_bind_error_r(void) +{ + return 0x0000252cU; +} +static inline u32 fifo_intr_sched_error_r(void) +{ + return 0x0000254cU; +} +static inline u32 fifo_intr_sched_error_code_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 fifo_intr_chsw_error_r(void) +{ + return 0x0000256cU; +} +static inline u32 fifo_intr_pbdma_id_r(void) +{ + return 0x000025a0U; +} +static inline u32 fifo_intr_pbdma_id_status_f(u32 v, u32 i) +{ + return (v & 0x1U) << (0U + i*1U); +} +static inline u32 fifo_intr_pbdma_id_status_v(u32 r, u32 i) +{ + return (r >> (0U + i*1U)) & 0x1U; +} +static inline u32 fifo_intr_pbdma_id_status__size_1_v(void) +{ + return 0x0000000eU; +} +static inline u32 fifo_intr_runlist_r(void) +{ + return 0x00002a00U; +} +static inline u32 fifo_fb_timeout_r(void) +{ + return 0x00002a04U; +} +static inline u32 fifo_fb_timeout_period_m(void) +{ + return 0x3fffffffU << 0U; +} +static inline u32 fifo_fb_timeout_period_max_f(void) +{ + return 0x3fffffffU; +} +static inline u32 fifo_fb_timeout_period_init_f(void) +{ + return 0x3c00U; +} +static inline u32 fifo_sched_disable_r(void) +{ + return 0x00002630U; +} +static inline u32 fifo_sched_disable_runlist_f(u32 v, u32 i) +{ + return (v & 0x1U) << (0U + i*1U); +} +static inline u32 fifo_sched_disable_runlist_m(u32 i) +{ + return 0x1U << (0U + i*1U); +} +static inline u32 fifo_sched_disable_true_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_runlist_preempt_r(void) +{ + return 0x00002638U; +} +static inline u32 fifo_runlist_preempt_runlist_f(u32 v, u32 i) +{ + return (v & 0x1U) << (0U + i*1U); +} +static inline u32 fifo_runlist_preempt_runlist_m(u32 i) +{ + return 0x1U << (0U + i*1U); +} +static inline u32 fifo_runlist_preempt_runlist_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_preempt_r(void) +{ + return 0x00002634U; +} +static inline u32 fifo_preempt_pending_true_f(void) +{ + return 0x100000U; +} +static inline u32 fifo_preempt_type_channel_f(void) +{ + return 0x0U; +} +static inline u32 fifo_preempt_type_tsg_f(void) +{ + return 0x1000000U; +} +static inline u32 fifo_preempt_chid_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 fifo_preempt_id_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 fifo_engine_status_r(u32 i) +{ + return 0x00002640U + i*8U; +} +static inline u32 fifo_engine_status__size_1_v(void) +{ + return 0x0000000fU; +} +static inline u32 fifo_engine_status_id_v(u32 r) +{ + return (r >> 0U) & 0xfffU; +} +static inline u32 fifo_engine_status_id_type_v(u32 r) +{ + return (r >> 12U) & 0x1U; +} +static inline u32 fifo_engine_status_id_type_chid_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_engine_status_id_type_tsgid_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_engine_status_ctx_status_v(u32 r) +{ + return (r >> 13U) & 0x7U; +} +static inline u32 fifo_engine_status_ctx_status_valid_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_engine_status_ctx_status_ctxsw_load_v(void) +{ + return 0x00000005U; +} +static inline u32 fifo_engine_status_ctx_status_ctxsw_save_v(void) +{ + return 0x00000006U; +} +static inline u32 fifo_engine_status_ctx_status_ctxsw_switch_v(void) +{ + return 0x00000007U; +} +static inline u32 fifo_engine_status_next_id_v(u32 r) +{ + return (r >> 16U) & 0xfffU; +} +static inline u32 fifo_engine_status_next_id_type_v(u32 r) +{ + return (r >> 28U) & 0x1U; +} +static inline u32 fifo_engine_status_next_id_type_chid_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_engine_status_eng_reload_v(u32 r) +{ + return (r >> 29U) & 0x1U; +} +static inline u32 fifo_engine_status_faulted_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 fifo_engine_status_faulted_true_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_engine_status_engine_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 fifo_engine_status_engine_idle_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_engine_status_engine_busy_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_engine_status_ctxsw_v(u32 r) +{ + return (r >> 15U) & 0x1U; +} +static inline u32 fifo_engine_status_ctxsw_in_progress_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_engine_status_ctxsw_in_progress_f(void) +{ + return 0x8000U; +} +static inline u32 fifo_pbdma_status_r(u32 i) +{ + return 0x00003080U + i*4U; +} +static inline u32 fifo_pbdma_status__size_1_v(void) +{ + return 0x0000000eU; +} +static inline u32 fifo_pbdma_status_id_v(u32 r) +{ + return (r >> 0U) & 0xfffU; +} +static inline u32 fifo_pbdma_status_id_type_v(u32 r) +{ + return (r >> 12U) & 0x1U; +} +static inline u32 fifo_pbdma_status_id_type_chid_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_pbdma_status_id_type_tsgid_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_pbdma_status_chan_status_v(u32 r) +{ + return (r >> 13U) & 0x7U; +} +static inline u32 fifo_pbdma_status_chan_status_valid_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_pbdma_status_chan_status_chsw_load_v(void) +{ + return 0x00000005U; +} +static inline u32 fifo_pbdma_status_chan_status_chsw_save_v(void) +{ + return 0x00000006U; +} +static inline u32 fifo_pbdma_status_chan_status_chsw_switch_v(void) +{ + return 0x00000007U; +} +static inline u32 fifo_pbdma_status_next_id_v(u32 r) +{ + return (r >> 16U) & 0xfffU; +} +static inline u32 fifo_pbdma_status_next_id_type_v(u32 r) +{ + return (r >> 28U) & 0x1U; +} +static inline u32 fifo_pbdma_status_next_id_type_chid_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_pbdma_status_chsw_v(u32 r) +{ + return (r >> 15U) & 0x1U; +} +static inline u32 fifo_pbdma_status_chsw_in_progress_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_cfg0_r(void) +{ + return 0x00002004U; +} +static inline u32 fifo_cfg0_num_pbdma_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 fifo_cfg0_pbdma_fault_id_v(u32 r) +{ + return (r >> 16U) & 0xffU; +} +static inline u32 fifo_fb_iface_r(void) +{ + return 0x000026f0U; +} +static inline u32 fifo_fb_iface_control_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 fifo_fb_iface_control_enable_f(void) +{ + return 0x1U; +} +static inline u32 fifo_fb_iface_status_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 fifo_fb_iface_status_enabled_f(void) +{ + return 0x10U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_flush_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_flush_gv100.h new file mode 100644 index 000000000..b60456261 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_flush_gv100.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_flush_gv100_h_ +#define _hw_flush_gv100_h_ + +static inline u32 flush_l2_system_invalidate_r(void) +{ + return 0x00070004U; +} +static inline u32 flush_l2_system_invalidate_pending_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 flush_l2_system_invalidate_pending_busy_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_l2_system_invalidate_pending_busy_f(void) +{ + return 0x1U; +} +static inline u32 flush_l2_system_invalidate_outstanding_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 flush_l2_system_invalidate_outstanding_true_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_l2_flush_dirty_r(void) +{ + return 0x00070010U; +} +static inline u32 flush_l2_flush_dirty_pending_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 flush_l2_flush_dirty_pending_empty_v(void) +{ + return 0x00000000U; +} +static inline u32 flush_l2_flush_dirty_pending_empty_f(void) +{ + return 0x0U; +} +static inline u32 flush_l2_flush_dirty_pending_busy_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_l2_flush_dirty_pending_busy_f(void) +{ + return 0x1U; +} +static inline u32 flush_l2_flush_dirty_outstanding_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 flush_l2_flush_dirty_outstanding_false_v(void) +{ + return 0x00000000U; +} +static inline u32 flush_l2_flush_dirty_outstanding_false_f(void) +{ + return 0x0U; +} +static inline u32 flush_l2_flush_dirty_outstanding_true_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_l2_clean_comptags_r(void) +{ + return 0x0007000cU; +} +static inline u32 flush_l2_clean_comptags_pending_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 flush_l2_clean_comptags_pending_empty_v(void) +{ + return 0x00000000U; +} +static inline u32 flush_l2_clean_comptags_pending_empty_f(void) +{ + return 0x0U; +} +static inline u32 flush_l2_clean_comptags_pending_busy_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_l2_clean_comptags_pending_busy_f(void) +{ + return 0x1U; +} +static inline u32 flush_l2_clean_comptags_outstanding_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 flush_l2_clean_comptags_outstanding_false_v(void) +{ + return 0x00000000U; +} +static inline u32 flush_l2_clean_comptags_outstanding_false_f(void) +{ + return 0x0U; +} +static inline u32 flush_l2_clean_comptags_outstanding_true_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_fb_flush_r(void) +{ + return 0x00070000U; +} +static inline u32 flush_fb_flush_pending_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 flush_fb_flush_pending_busy_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_fb_flush_pending_busy_f(void) +{ + return 0x1U; +} +static inline u32 flush_fb_flush_outstanding_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 flush_fb_flush_outstanding_true_v(void) +{ + return 0x00000001U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fuse_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fuse_gv100.h new file mode 100644 index 000000000..f7eacd290 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_fuse_gv100.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_fuse_gv100_h_ +#define _hw_fuse_gv100_h_ + +static inline u32 fuse_status_opt_tpc_gpc_r(u32 i) +{ + return 0x00021c38U + i*4U; +} +static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i) +{ + return 0x00021838U + i*4U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_r(void) +{ + return 0x00021944U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_m(void) +{ + return 0xffU << 0U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_r(void) +{ + return 0x00021948U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_m(void) +{ + return 0x1U << 0U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_yes_f(void) +{ + return 0x1U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_no_f(void) +{ + return 0x0U; +} +static inline u32 fuse_status_opt_fbio_r(void) +{ + return 0x00021c14U; +} +static inline u32 fuse_status_opt_fbio_data_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 fuse_status_opt_fbio_data_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 fuse_status_opt_fbio_data_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i) +{ + return 0x00021d70U + i*4U; +} +static inline u32 fuse_status_opt_fbp_r(void) +{ + return 0x00021d38U; +} +static inline u32 fuse_status_opt_fbp_idx_v(u32 r, u32 i) +{ + return (r >> (0U + i*1U)) & 0x1U; +} +static inline u32 fuse_opt_ecc_en_r(void) +{ + return 0x00021228U; +} +static inline u32 fuse_opt_feature_fuses_override_disable_r(void) +{ + return 0x000213f0U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gmmu_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gmmu_gv100.h new file mode 100644 index 000000000..cf89f5f8f --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gmmu_gv100.h @@ -0,0 +1,1287 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_gmmu_gv100_h_ +#define _hw_gmmu_gv100_h_ + +static inline u32 gmmu_new_pde_is_pte_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pde_is_pte_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pde_aperture_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pde_aperture_invalid_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pde_aperture_video_memory_f(void) +{ + return 0x2U; +} +static inline u32 gmmu_new_pde_aperture_sys_mem_coh_f(void) +{ + return 0x4U; +} +static inline u32 gmmu_new_pde_aperture_sys_mem_ncoh_f(void) +{ + return 0x6U; +} +static inline u32 gmmu_new_pde_address_sys_f(u32 v) +{ + return (v & 0xffffffU) << 8U; +} +static inline u32 gmmu_new_pde_address_sys_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pde_vol_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pde_vol_true_f(void) +{ + return 0x8U; +} +static inline u32 gmmu_new_pde_vol_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pde_address_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 gmmu_new_pde__size_v(void) +{ + return 0x00000008U; +} +static inline u32 gmmu_new_dual_pde_is_pte_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_dual_pde_is_pte_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_dual_pde_aperture_big_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_dual_pde_aperture_big_invalid_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_dual_pde_aperture_big_video_memory_f(void) +{ + return 0x2U; +} +static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_coh_f(void) +{ + return 0x4U; +} +static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(void) +{ + return 0x6U; +} +static inline u32 gmmu_new_dual_pde_address_big_sys_f(u32 v) +{ + return (v & 0xfffffffU) << 4U; +} +static inline u32 gmmu_new_dual_pde_address_big_sys_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_dual_pde_aperture_small_w(void) +{ + return 2U; +} +static inline u32 gmmu_new_dual_pde_aperture_small_invalid_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_dual_pde_aperture_small_video_memory_f(void) +{ + return 0x2U; +} +static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_coh_f(void) +{ + return 0x4U; +} +static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(void) +{ + return 0x6U; +} +static inline u32 gmmu_new_dual_pde_vol_small_w(void) +{ + return 2U; +} +static inline u32 gmmu_new_dual_pde_vol_small_true_f(void) +{ + return 0x8U; +} +static inline u32 gmmu_new_dual_pde_vol_small_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_dual_pde_vol_big_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_dual_pde_vol_big_true_f(void) +{ + return 0x8U; +} +static inline u32 gmmu_new_dual_pde_vol_big_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_dual_pde_address_small_sys_f(u32 v) +{ + return (v & 0xffffffU) << 8U; +} +static inline u32 gmmu_new_dual_pde_address_small_sys_w(void) +{ + return 2U; +} +static inline u32 gmmu_new_dual_pde_address_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 gmmu_new_dual_pde_address_big_shift_v(void) +{ + return 0x00000008U; +} +static inline u32 gmmu_new_dual_pde__size_v(void) +{ + return 0x00000010U; +} +static inline u32 gmmu_new_pte__size_v(void) +{ + return 0x00000008U; +} +static inline u32 gmmu_new_pte_valid_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_valid_true_f(void) +{ + return 0x1U; +} +static inline u32 gmmu_new_pte_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pte_privilege_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_privilege_true_f(void) +{ + return 0x20U; +} +static inline u32 gmmu_new_pte_privilege_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pte_address_sys_f(u32 v) +{ + return (v & 0xffffffU) << 8U; +} +static inline u32 gmmu_new_pte_address_sys_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_address_vid_f(u32 v) +{ + return (v & 0xffffffU) << 8U; +} +static inline u32 gmmu_new_pte_address_vid_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_vol_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_vol_true_f(void) +{ + return 0x8U; +} +static inline u32 gmmu_new_pte_vol_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pte_aperture_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_aperture_video_memory_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pte_aperture_sys_mem_coh_f(void) +{ + return 0x4U; +} +static inline u32 gmmu_new_pte_aperture_sys_mem_ncoh_f(void) +{ + return 0x6U; +} +static inline u32 gmmu_new_pte_read_only_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_read_only_true_f(void) +{ + return 0x40U; +} +static inline u32 gmmu_new_pte_comptagline_f(u32 v) +{ + return (v & 0x3ffffU) << 4U; +} +static inline u32 gmmu_new_pte_comptagline_w(void) +{ + return 1U; +} +static inline u32 gmmu_new_pte_kind_f(u32 v) +{ + return (v & 0xffU) << 24U; +} +static inline u32 gmmu_new_pte_kind_w(void) +{ + return 1U; +} +static inline u32 gmmu_new_pte_address_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 gmmu_pte_kind_f(u32 v) +{ + return (v & 0xffU) << 4U; +} +static inline u32 gmmu_pte_kind_w(void) +{ + return 1U; +} +static inline u32 gmmu_pte_kind_invalid_v(void) +{ + return 0x000000ffU; +} +static inline u32 gmmu_pte_kind_pitch_v(void) +{ + return 0x00000000U; +} +static inline u32 gmmu_pte_kind_z16_v(void) +{ + return 0x00000001U; +} +static inline u32 gmmu_pte_kind_z16_2c_v(void) +{ + return 0x00000002U; +} +static inline u32 gmmu_pte_kind_z16_ms2_2c_v(void) +{ + return 0x00000003U; +} +static inline u32 gmmu_pte_kind_z16_ms4_2c_v(void) +{ + return 0x00000004U; +} +static inline u32 gmmu_pte_kind_z16_ms8_2c_v(void) +{ + return 0x00000005U; +} +static inline u32 gmmu_pte_kind_z16_ms16_2c_v(void) +{ + return 0x00000006U; +} +static inline u32 gmmu_pte_kind_z16_2z_v(void) +{ + return 0x00000007U; +} +static inline u32 gmmu_pte_kind_z16_ms2_2z_v(void) +{ + return 0x00000008U; +} +static inline u32 gmmu_pte_kind_z16_ms4_2z_v(void) +{ + return 0x00000009U; +} +static inline u32 gmmu_pte_kind_z16_ms8_2z_v(void) +{ + return 0x0000000aU; +} +static inline u32 gmmu_pte_kind_z16_ms16_2z_v(void) +{ + return 0x0000000bU; +} +static inline u32 gmmu_pte_kind_z16_2cz_v(void) +{ + return 0x00000036U; +} +static inline u32 gmmu_pte_kind_z16_ms2_2cz_v(void) +{ + return 0x00000037U; +} +static inline u32 gmmu_pte_kind_z16_ms4_2cz_v(void) +{ + return 0x00000038U; +} +static inline u32 gmmu_pte_kind_z16_ms8_2cz_v(void) +{ + return 0x00000039U; +} +static inline u32 gmmu_pte_kind_z16_ms16_2cz_v(void) +{ + return 0x0000005fU; +} +static inline u32 gmmu_pte_kind_s8z24_v(void) +{ + return 0x00000011U; +} +static inline u32 gmmu_pte_kind_s8z24_1z_v(void) +{ + return 0x00000012U; +} +static inline u32 gmmu_pte_kind_s8z24_ms2_1z_v(void) +{ + return 0x00000013U; +} +static inline u32 gmmu_pte_kind_s8z24_ms4_1z_v(void) +{ + return 0x00000014U; +} +static inline u32 gmmu_pte_kind_s8z24_ms8_1z_v(void) +{ + return 0x00000015U; +} +static inline u32 gmmu_pte_kind_s8z24_ms16_1z_v(void) +{ + return 0x00000016U; +} +static inline u32 gmmu_pte_kind_s8z24_2cz_v(void) +{ + return 0x00000017U; +} +static inline u32 gmmu_pte_kind_s8z24_ms2_2cz_v(void) +{ + return 0x00000018U; +} +static inline u32 gmmu_pte_kind_s8z24_ms4_2cz_v(void) +{ + return 0x00000019U; +} +static inline u32 gmmu_pte_kind_s8z24_ms8_2cz_v(void) +{ + return 0x0000001aU; +} +static inline u32 gmmu_pte_kind_s8z24_ms16_2cz_v(void) +{ + return 0x0000001bU; +} +static inline u32 gmmu_pte_kind_s8z24_2cs_v(void) +{ + return 0x0000001cU; +} +static inline u32 gmmu_pte_kind_s8z24_ms2_2cs_v(void) +{ + return 0x0000001dU; +} +static inline u32 gmmu_pte_kind_s8z24_ms4_2cs_v(void) +{ + return 0x0000001eU; +} +static inline u32 gmmu_pte_kind_s8z24_ms8_2cs_v(void) +{ + return 0x0000001fU; +} +static inline u32 gmmu_pte_kind_s8z24_ms16_2cs_v(void) +{ + return 0x00000020U; +} +static inline u32 gmmu_pte_kind_s8z24_4cszv_v(void) +{ + return 0x00000021U; +} +static inline u32 gmmu_pte_kind_s8z24_ms2_4cszv_v(void) +{ + return 0x00000022U; +} +static inline u32 gmmu_pte_kind_s8z24_ms4_4cszv_v(void) +{ + return 0x00000023U; +} +static inline u32 gmmu_pte_kind_s8z24_ms8_4cszv_v(void) +{ + return 0x00000024U; +} +static inline u32 gmmu_pte_kind_s8z24_ms16_4cszv_v(void) +{ + return 0x00000025U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_v(void) +{ + return 0x00000026U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_v(void) +{ + return 0x00000027U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_v(void) +{ + return 0x00000028U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_v(void) +{ + return 0x00000029U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_1zv_v(void) +{ + return 0x0000002eU; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_1zv_v(void) +{ + return 0x0000002fU; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_1zv_v(void) +{ + return 0x00000030U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_1zv_v(void) +{ + return 0x00000031U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2cs_v(void) +{ + return 0x00000032U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2cs_v(void) +{ + return 0x00000033U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2cs_v(void) +{ + return 0x00000034U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2cs_v(void) +{ + return 0x00000035U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2czv_v(void) +{ + return 0x0000003aU; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2czv_v(void) +{ + return 0x0000003bU; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2czv_v(void) +{ + return 0x0000003cU; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2czv_v(void) +{ + return 0x0000003dU; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2zv_v(void) +{ + return 0x0000003eU; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2zv_v(void) +{ + return 0x0000003fU; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2zv_v(void) +{ + return 0x00000040U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2zv_v(void) +{ + return 0x00000041U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_4cszv_v(void) +{ + return 0x00000042U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_4cszv_v(void) +{ + return 0x00000043U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_4cszv_v(void) +{ + return 0x00000044U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_4cszv_v(void) +{ + return 0x00000045U; +} +static inline u32 gmmu_pte_kind_z24s8_v(void) +{ + return 0x00000046U; +} +static inline u32 gmmu_pte_kind_z24s8_1z_v(void) +{ + return 0x00000047U; +} +static inline u32 gmmu_pte_kind_z24s8_ms2_1z_v(void) +{ + return 0x00000048U; +} +static inline u32 gmmu_pte_kind_z24s8_ms4_1z_v(void) +{ + return 0x00000049U; +} +static inline u32 gmmu_pte_kind_z24s8_ms8_1z_v(void) +{ + return 0x0000004aU; +} +static inline u32 gmmu_pte_kind_z24s8_ms16_1z_v(void) +{ + return 0x0000004bU; +} +static inline u32 gmmu_pte_kind_z24s8_2cs_v(void) +{ + return 0x0000004cU; +} +static inline u32 gmmu_pte_kind_z24s8_ms2_2cs_v(void) +{ + return 0x0000004dU; +} +static inline u32 gmmu_pte_kind_z24s8_ms4_2cs_v(void) +{ + return 0x0000004eU; +} +static inline u32 gmmu_pte_kind_z24s8_ms8_2cs_v(void) +{ + return 0x0000004fU; +} +static inline u32 gmmu_pte_kind_z24s8_ms16_2cs_v(void) +{ + return 0x00000050U; +} +static inline u32 gmmu_pte_kind_z24s8_2cz_v(void) +{ + return 0x00000051U; +} +static inline u32 gmmu_pte_kind_z24s8_ms2_2cz_v(void) +{ + return 0x00000052U; +} +static inline u32 gmmu_pte_kind_z24s8_ms4_2cz_v(void) +{ + return 0x00000053U; +} +static inline u32 gmmu_pte_kind_z24s8_ms8_2cz_v(void) +{ + return 0x00000054U; +} +static inline u32 gmmu_pte_kind_z24s8_ms16_2cz_v(void) +{ + return 0x00000055U; +} +static inline u32 gmmu_pte_kind_z24s8_4cszv_v(void) +{ + return 0x00000056U; +} +static inline u32 gmmu_pte_kind_z24s8_ms2_4cszv_v(void) +{ + return 0x00000057U; +} +static inline u32 gmmu_pte_kind_z24s8_ms4_4cszv_v(void) +{ + return 0x00000058U; +} +static inline u32 gmmu_pte_kind_z24s8_ms8_4cszv_v(void) +{ + return 0x00000059U; +} +static inline u32 gmmu_pte_kind_z24s8_ms16_4cszv_v(void) +{ + return 0x0000005aU; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_v(void) +{ + return 0x0000005bU; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_v(void) +{ + return 0x0000005cU; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_v(void) +{ + return 0x0000005dU; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_v(void) +{ + return 0x0000005eU; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_1zv_v(void) +{ + return 0x00000063U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_1zv_v(void) +{ + return 0x00000064U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_1zv_v(void) +{ + return 0x00000065U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_1zv_v(void) +{ + return 0x00000066U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2cs_v(void) +{ + return 0x00000067U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2cs_v(void) +{ + return 0x00000068U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2cs_v(void) +{ + return 0x00000069U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2cs_v(void) +{ + return 0x0000006aU; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2czv_v(void) +{ + return 0x0000006fU; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2czv_v(void) +{ + return 0x00000070U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2czv_v(void) +{ + return 0x00000071U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2czv_v(void) +{ + return 0x00000072U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2zv_v(void) +{ + return 0x00000073U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2zv_v(void) +{ + return 0x00000074U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2zv_v(void) +{ + return 0x00000075U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2zv_v(void) +{ + return 0x00000076U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_4cszv_v(void) +{ + return 0x00000077U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_4cszv_v(void) +{ + return 0x00000078U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_4cszv_v(void) +{ + return 0x00000079U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_4cszv_v(void) +{ + return 0x0000007aU; +} +static inline u32 gmmu_pte_kind_zf32_v(void) +{ + return 0x0000007bU; +} +static inline u32 gmmu_pte_kind_zf32_1z_v(void) +{ + return 0x0000007cU; +} +static inline u32 gmmu_pte_kind_zf32_ms2_1z_v(void) +{ + return 0x0000007dU; +} +static inline u32 gmmu_pte_kind_zf32_ms4_1z_v(void) +{ + return 0x0000007eU; +} +static inline u32 gmmu_pte_kind_zf32_ms8_1z_v(void) +{ + return 0x0000007fU; +} +static inline u32 gmmu_pte_kind_zf32_ms16_1z_v(void) +{ + return 0x00000080U; +} +static inline u32 gmmu_pte_kind_zf32_2cs_v(void) +{ + return 0x00000081U; +} +static inline u32 gmmu_pte_kind_zf32_ms2_2cs_v(void) +{ + return 0x00000082U; +} +static inline u32 gmmu_pte_kind_zf32_ms4_2cs_v(void) +{ + return 0x00000083U; +} +static inline u32 gmmu_pte_kind_zf32_ms8_2cs_v(void) +{ + return 0x00000084U; +} +static inline u32 gmmu_pte_kind_zf32_ms16_2cs_v(void) +{ + return 0x00000085U; +} +static inline u32 gmmu_pte_kind_zf32_2cz_v(void) +{ + return 0x00000086U; +} +static inline u32 gmmu_pte_kind_zf32_ms2_2cz_v(void) +{ + return 0x00000087U; +} +static inline u32 gmmu_pte_kind_zf32_ms4_2cz_v(void) +{ + return 0x00000088U; +} +static inline u32 gmmu_pte_kind_zf32_ms8_2cz_v(void) +{ + return 0x00000089U; +} +static inline u32 gmmu_pte_kind_zf32_ms16_2cz_v(void) +{ + return 0x0000008aU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_v(void) +{ + return 0x0000008bU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_v(void) +{ + return 0x0000008cU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_v(void) +{ + return 0x0000008dU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_v(void) +{ + return 0x0000008eU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1cs_v(void) +{ + return 0x0000008fU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1cs_v(void) +{ + return 0x00000090U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1cs_v(void) +{ + return 0x00000091U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1cs_v(void) +{ + return 0x00000092U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1zv_v(void) +{ + return 0x00000097U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1zv_v(void) +{ + return 0x00000098U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1zv_v(void) +{ + return 0x00000099U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1zv_v(void) +{ + return 0x0000009aU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1czv_v(void) +{ + return 0x0000009bU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1czv_v(void) +{ + return 0x0000009cU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1czv_v(void) +{ + return 0x0000009dU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1czv_v(void) +{ + return 0x0000009eU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_2cs_v(void) +{ + return 0x0000009fU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_2cs_v(void) +{ + return 0x000000a0U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_2cs_v(void) +{ + return 0x000000a1U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_2cs_v(void) +{ + return 0x000000a2U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_2cszv_v(void) +{ + return 0x000000a3U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_2cszv_v(void) +{ + return 0x000000a4U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_2cszv_v(void) +{ + return 0x000000a5U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_2cszv_v(void) +{ + return 0x000000a6U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_v(void) +{ + return 0x000000a7U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_v(void) +{ + return 0x000000a8U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_v(void) +{ + return 0x000000a9U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_v(void) +{ + return 0x000000aaU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1cs_v(void) +{ + return 0x000000abU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1cs_v(void) +{ + return 0x000000acU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1cs_v(void) +{ + return 0x000000adU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1cs_v(void) +{ + return 0x000000aeU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1zv_v(void) +{ + return 0x000000b3U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1zv_v(void) +{ + return 0x000000b4U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1zv_v(void) +{ + return 0x000000b5U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1zv_v(void) +{ + return 0x000000b6U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1czv_v(void) +{ + return 0x000000b7U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1czv_v(void) +{ + return 0x000000b8U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1czv_v(void) +{ + return 0x000000b9U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1czv_v(void) +{ + return 0x000000baU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_2cs_v(void) +{ + return 0x000000bbU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_2cs_v(void) +{ + return 0x000000bcU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_2cs_v(void) +{ + return 0x000000bdU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_2cs_v(void) +{ + return 0x000000beU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_2cszv_v(void) +{ + return 0x000000bfU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_2cszv_v(void) +{ + return 0x000000c0U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_2cszv_v(void) +{ + return 0x000000c1U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_2cszv_v(void) +{ + return 0x000000c2U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_v(void) +{ + return 0x000000c3U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_1cs_v(void) +{ + return 0x000000c4U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_1cs_v(void) +{ + return 0x000000c5U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_1cs_v(void) +{ + return 0x000000c6U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_1cs_v(void) +{ + return 0x000000c7U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_1cs_v(void) +{ + return 0x000000c8U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_2cszv_v(void) +{ + return 0x000000ceU; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_2cszv_v(void) +{ + return 0x000000cfU; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_2cszv_v(void) +{ + return 0x000000d0U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_2cszv_v(void) +{ + return 0x000000d1U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_2cszv_v(void) +{ + return 0x000000d2U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_2cs_v(void) +{ + return 0x000000d3U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_2cs_v(void) +{ + return 0x000000d4U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_2cs_v(void) +{ + return 0x000000d5U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_2cs_v(void) +{ + return 0x000000d6U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_2cs_v(void) +{ + return 0x000000d7U; +} +static inline u32 gmmu_pte_kind_generic_16bx2_v(void) +{ + return 0x000000feU; +} +static inline u32 gmmu_pte_kind_c32_2c_v(void) +{ + return 0x000000d8U; +} +static inline u32 gmmu_pte_kind_c32_2cbr_v(void) +{ + return 0x000000d9U; +} +static inline u32 gmmu_pte_kind_c32_2cba_v(void) +{ + return 0x000000daU; +} +static inline u32 gmmu_pte_kind_c32_2cra_v(void) +{ + return 0x000000dbU; +} +static inline u32 gmmu_pte_kind_c32_2bra_v(void) +{ + return 0x000000dcU; +} +static inline u32 gmmu_pte_kind_c32_ms2_2c_v(void) +{ + return 0x000000ddU; +} +static inline u32 gmmu_pte_kind_c32_ms2_2cbr_v(void) +{ + return 0x000000deU; +} +static inline u32 gmmu_pte_kind_c32_ms2_4cbra_v(void) +{ + return 0x000000ccU; +} +static inline u32 gmmu_pte_kind_c32_ms4_2c_v(void) +{ + return 0x000000dfU; +} +static inline u32 gmmu_pte_kind_c32_ms4_2cbr_v(void) +{ + return 0x000000e0U; +} +static inline u32 gmmu_pte_kind_c32_ms4_2cba_v(void) +{ + return 0x000000e1U; +} +static inline u32 gmmu_pte_kind_c32_ms4_2cra_v(void) +{ + return 0x000000e2U; +} +static inline u32 gmmu_pte_kind_c32_ms4_2bra_v(void) +{ + return 0x000000e3U; +} +static inline u32 gmmu_pte_kind_c32_ms4_4cbra_v(void) +{ + return 0x0000002cU; +} +static inline u32 gmmu_pte_kind_c32_ms8_ms16_2c_v(void) +{ + return 0x000000e4U; +} +static inline u32 gmmu_pte_kind_c32_ms8_ms16_2cra_v(void) +{ + return 0x000000e5U; +} +static inline u32 gmmu_pte_kind_c64_2c_v(void) +{ + return 0x000000e6U; +} +static inline u32 gmmu_pte_kind_c64_2cbr_v(void) +{ + return 0x000000e7U; +} +static inline u32 gmmu_pte_kind_c64_2cba_v(void) +{ + return 0x000000e8U; +} +static inline u32 gmmu_pte_kind_c64_2cra_v(void) +{ + return 0x000000e9U; +} +static inline u32 gmmu_pte_kind_c64_2bra_v(void) +{ + return 0x000000eaU; +} +static inline u32 gmmu_pte_kind_c64_ms2_2c_v(void) +{ + return 0x000000ebU; +} +static inline u32 gmmu_pte_kind_c64_ms2_2cbr_v(void) +{ + return 0x000000ecU; +} +static inline u32 gmmu_pte_kind_c64_ms2_4cbra_v(void) +{ + return 0x000000cdU; +} +static inline u32 gmmu_pte_kind_c64_ms4_2c_v(void) +{ + return 0x000000edU; +} +static inline u32 gmmu_pte_kind_c64_ms4_2cbr_v(void) +{ + return 0x000000eeU; +} +static inline u32 gmmu_pte_kind_c64_ms4_2cba_v(void) +{ + return 0x000000efU; +} +static inline u32 gmmu_pte_kind_c64_ms4_2cra_v(void) +{ + return 0x000000f0U; +} +static inline u32 gmmu_pte_kind_c64_ms4_2bra_v(void) +{ + return 0x000000f1U; +} +static inline u32 gmmu_pte_kind_c64_ms4_4cbra_v(void) +{ + return 0x0000002dU; +} +static inline u32 gmmu_pte_kind_c64_ms8_ms16_2c_v(void) +{ + return 0x000000f2U; +} +static inline u32 gmmu_pte_kind_c64_ms8_ms16_2cra_v(void) +{ + return 0x000000f3U; +} +static inline u32 gmmu_pte_kind_c128_2c_v(void) +{ + return 0x000000f4U; +} +static inline u32 gmmu_pte_kind_c128_2cr_v(void) +{ + return 0x000000f5U; +} +static inline u32 gmmu_pte_kind_c128_ms2_2c_v(void) +{ + return 0x000000f6U; +} +static inline u32 gmmu_pte_kind_c128_ms2_2cr_v(void) +{ + return 0x000000f7U; +} +static inline u32 gmmu_pte_kind_c128_ms4_2c_v(void) +{ + return 0x000000f8U; +} +static inline u32 gmmu_pte_kind_c128_ms4_2cr_v(void) +{ + return 0x000000f9U; +} +static inline u32 gmmu_pte_kind_c128_ms8_ms16_2c_v(void) +{ + return 0x000000faU; +} +static inline u32 gmmu_pte_kind_c128_ms8_ms16_2cr_v(void) +{ + return 0x000000fbU; +} +static inline u32 gmmu_pte_kind_x8c24_v(void) +{ + return 0x000000fcU; +} +static inline u32 gmmu_pte_kind_pitch_no_swizzle_v(void) +{ + return 0x000000fdU; +} +static inline u32 gmmu_pte_kind_smsked_message_v(void) +{ + return 0x000000caU; +} +static inline u32 gmmu_pte_kind_smhost_message_v(void) +{ + return 0x000000cbU; +} +static inline u32 gmmu_pte_kind_s8_v(void) +{ + return 0x0000002aU; +} +static inline u32 gmmu_pte_kind_s8_2s_v(void) +{ + return 0x0000002bU; +} +static inline u32 gmmu_fault_client_type_gpc_v(void) +{ + return 0x00000000U; +} +static inline u32 gmmu_fault_client_type_hub_v(void) +{ + return 0x00000001U; +} +static inline u32 gmmu_fault_type_unbound_inst_block_v(void) +{ + return 0x00000004U; +} +static inline u32 gmmu_fault_mmu_eng_id_bar2_v(void) +{ + return 0x00000005U; +} +static inline u32 gmmu_fault_mmu_eng_id_physical_v(void) +{ + return 0x0000001fU; +} +static inline u32 gmmu_fault_mmu_eng_id_ce0_v(void) +{ + return 0x0000000fU; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gr_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gr_gv100.h new file mode 100644 index 000000000..09cbc7934 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_gr_gv100.h @@ -0,0 +1,3935 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_gr_gv100_h_ +#define _hw_gr_gv100_h_ + +static inline u32 gr_intr_r(void) +{ + return 0x00400100U; +} +static inline u32 gr_intr_notify_pending_f(void) +{ + return 0x1U; +} +static inline u32 gr_intr_notify_reset_f(void) +{ + return 0x1U; +} +static inline u32 gr_intr_semaphore_pending_f(void) +{ + return 0x2U; +} +static inline u32 gr_intr_semaphore_reset_f(void) +{ + return 0x2U; +} +static inline u32 gr_intr_illegal_method_pending_f(void) +{ + return 0x10U; +} +static inline u32 gr_intr_illegal_method_reset_f(void) +{ + return 0x10U; +} +static inline u32 gr_intr_illegal_notify_pending_f(void) +{ + return 0x40U; +} +static inline u32 gr_intr_illegal_notify_reset_f(void) +{ + return 0x40U; +} +static inline u32 gr_intr_firmware_method_f(u32 v) +{ + return (v & 0x1U) << 8U; +} +static inline u32 gr_intr_firmware_method_pending_f(void) +{ + return 0x100U; +} +static inline u32 gr_intr_firmware_method_reset_f(void) +{ + return 0x100U; +} +static inline u32 gr_intr_illegal_class_pending_f(void) +{ + return 0x20U; +} +static inline u32 gr_intr_illegal_class_reset_f(void) +{ + return 0x20U; +} +static inline u32 gr_intr_fecs_error_pending_f(void) +{ + return 0x80000U; +} +static inline u32 gr_intr_fecs_error_reset_f(void) +{ + return 0x80000U; +} +static inline u32 gr_intr_class_error_pending_f(void) +{ + return 0x100000U; +} +static inline u32 gr_intr_class_error_reset_f(void) +{ + return 0x100000U; +} +static inline u32 gr_intr_exception_pending_f(void) +{ + return 0x200000U; +} +static inline u32 gr_intr_exception_reset_f(void) +{ + return 0x200000U; +} +static inline u32 gr_fecs_intr_r(void) +{ + return 0x00400144U; +} +static inline u32 gr_class_error_r(void) +{ + return 0x00400110U; +} +static inline u32 gr_class_error_code_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_intr_nonstall_r(void) +{ + return 0x00400120U; +} +static inline u32 gr_intr_nonstall_trap_pending_f(void) +{ + return 0x2U; +} +static inline u32 gr_intr_en_r(void) +{ + return 0x0040013cU; +} +static inline u32 gr_exception_r(void) +{ + return 0x00400108U; +} +static inline u32 gr_exception_fe_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_exception_gpc_m(void) +{ + return 0x1U << 24U; +} +static inline u32 gr_exception_memfmt_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_exception_ds_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_exception_sked_m(void) +{ + return 0x1U << 8U; +} +static inline u32 gr_exception1_r(void) +{ + return 0x00400118U; +} +static inline u32 gr_exception1_gpc_0_pending_f(void) +{ + return 0x1U; +} +static inline u32 gr_exception2_r(void) +{ + return 0x0040011cU; +} +static inline u32 gr_exception_en_r(void) +{ + return 0x00400138U; +} +static inline u32 gr_exception_en_fe_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_exception_en_fe_enabled_f(void) +{ + return 0x1U; +} +static inline u32 gr_exception_en_gpc_m(void) +{ + return 0x1U << 24U; +} +static inline u32 gr_exception_en_gpc_enabled_f(void) +{ + return 0x1000000U; +} +static inline u32 gr_exception_en_memfmt_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_exception_en_memfmt_enabled_f(void) +{ + return 0x2U; +} +static inline u32 gr_exception_en_ds_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_exception_en_ds_enabled_f(void) +{ + return 0x10U; +} +static inline u32 gr_exception1_en_r(void) +{ + return 0x00400130U; +} +static inline u32 gr_exception2_en_r(void) +{ + return 0x00400134U; +} +static inline u32 gr_gpfifo_ctl_r(void) +{ + return 0x00400500U; +} +static inline u32 gr_gpfifo_ctl_access_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_gpfifo_ctl_access_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpfifo_ctl_access_enabled_f(void) +{ + return 0x1U; +} +static inline u32 gr_gpfifo_ctl_semaphore_access_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_f(void) +{ + return 0x10000U; +} +static inline u32 gr_gpfifo_status_r(void) +{ + return 0x00400504U; +} +static inline u32 gr_trapped_addr_r(void) +{ + return 0x00400704U; +} +static inline u32 gr_trapped_addr_mthd_v(u32 r) +{ + return (r >> 2U) & 0xfffU; +} +static inline u32 gr_trapped_addr_subch_v(u32 r) +{ + return (r >> 16U) & 0x7U; +} +static inline u32 gr_trapped_addr_mme_generated_v(u32 r) +{ + return (r >> 20U) & 0x1U; +} +static inline u32 gr_trapped_addr_datahigh_v(u32 r) +{ + return (r >> 24U) & 0x1U; +} +static inline u32 gr_trapped_addr_priv_v(u32 r) +{ + return (r >> 28U) & 0x1U; +} +static inline u32 gr_trapped_addr_status_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 gr_trapped_data_lo_r(void) +{ + return 0x00400708U; +} +static inline u32 gr_trapped_data_hi_r(void) +{ + return 0x0040070cU; +} +static inline u32 gr_trapped_data_mme_r(void) +{ + return 0x00400710U; +} +static inline u32 gr_trapped_data_mme_pc_v(u32 r) +{ + return (r >> 0U) & 0xfffU; +} +static inline u32 gr_status_r(void) +{ + return 0x00400700U; +} +static inline u32 gr_status_fe_method_upper_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 gr_status_fe_method_lower_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 gr_status_fe_method_lower_idle_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_status_fe_gi_v(u32 r) +{ + return (r >> 21U) & 0x1U; +} +static inline u32 gr_status_mask_r(void) +{ + return 0x00400610U; +} +static inline u32 gr_status_1_r(void) +{ + return 0x00400604U; +} +static inline u32 gr_status_2_r(void) +{ + return 0x00400608U; +} +static inline u32 gr_engine_status_r(void) +{ + return 0x0040060cU; +} +static inline u32 gr_engine_status_value_busy_f(void) +{ + return 0x1U; +} +static inline u32 gr_pri_be0_becs_be_exception_r(void) +{ + return 0x00410204U; +} +static inline u32 gr_pri_be0_becs_be_exception_en_r(void) +{ + return 0x00410208U; +} +static inline u32 gr_pri_gpc0_gpccs_gpc_exception_r(void) +{ + return 0x00502c90U; +} +static inline u32 gr_pri_gpc0_gpccs_gpc_exception_en_r(void) +{ + return 0x00502c94U; +} +static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_r(void) +{ + return 0x00504508U; +} +static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_en_r(void) +{ + return 0x0050450cU; +} +static inline u32 gr_activity_0_r(void) +{ + return 0x00400380U; +} +static inline u32 gr_activity_1_r(void) +{ + return 0x00400384U; +} +static inline u32 gr_activity_2_r(void) +{ + return 0x00400388U; +} +static inline u32 gr_activity_4_r(void) +{ + return 0x00400390U; +} +static inline u32 gr_activity_4_gpc0_s(void) +{ + return 3U; +} +static inline u32 gr_activity_4_gpc0_f(u32 v) +{ + return (v & 0x7U) << 0U; +} +static inline u32 gr_activity_4_gpc0_m(void) +{ + return 0x7U << 0U; +} +static inline u32 gr_activity_4_gpc0_v(u32 r) +{ + return (r >> 0U) & 0x7U; +} +static inline u32 gr_activity_4_gpc0_empty_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_activity_4_gpc0_preempted_v(void) +{ + return 0x00000004U; +} +static inline u32 gr_pri_gpc0_gcc_dbg_r(void) +{ + return 0x00501000U; +} +static inline u32 gr_pri_gpcs_gcc_dbg_r(void) +{ + return 0x00419000U; +} +static inline u32 gr_pri_gpcs_gcc_dbg_invalidate_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cache_control_r(void) +{ + return 0x0050433cU; +} +static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_r(void) +{ + return 0x00419b3cU; +} +static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_invalidate_cache_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_pri_sked_activity_r(void) +{ + return 0x00407054U; +} +static inline u32 gr_pri_gpc0_gpccs_gpc_activity0_r(void) +{ + return 0x00502c80U; +} +static inline u32 gr_pri_gpc0_gpccs_gpc_activity1_r(void) +{ + return 0x00502c84U; +} +static inline u32 gr_pri_gpc0_gpccs_gpc_activity2_r(void) +{ + return 0x00502c88U; +} +static inline u32 gr_pri_gpc0_gpccs_gpc_activity3_r(void) +{ + return 0x00502c8cU; +} +static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r(void) +{ + return 0x00504500U; +} +static inline u32 gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r(void) +{ + return 0x00504d00U; +} +static inline u32 gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r(void) +{ + return 0x00501d00U; +} +static inline u32 gr_pri_gpcs_gpccs_gpc_activity_0_r(void) +{ + return 0x0041ac80U; +} +static inline u32 gr_pri_gpcs_gpccs_gpc_activity_1_r(void) +{ + return 0x0041ac84U; +} +static inline u32 gr_pri_gpcs_gpccs_gpc_activity_2_r(void) +{ + return 0x0041ac88U; +} +static inline u32 gr_pri_gpcs_gpccs_gpc_activity_3_r(void) +{ + return 0x0041ac8cU; +} +static inline u32 gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r(void) +{ + return 0x0041c500U; +} +static inline u32 gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r(void) +{ + return 0x0041cd00U; +} +static inline u32 gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r(void) +{ + return 0x00419d00U; +} +static inline u32 gr_pri_be0_becs_be_activity0_r(void) +{ + return 0x00410200U; +} +static inline u32 gr_pri_be1_becs_be_activity0_r(void) +{ + return 0x00410600U; +} +static inline u32 gr_pri_bes_becs_be_activity0_r(void) +{ + return 0x00408a00U; +} +static inline u32 gr_pri_ds_mpipe_status_r(void) +{ + return 0x00405858U; +} +static inline u32 gr_pri_fe_go_idle_info_r(void) +{ + return 0x00404194U; +} +static inline u32 gr_pri_fe_chip_def_info_r(void) +{ + return 0x00404030U; +} +static inline u32 gr_pri_fe_chip_def_info_max_veid_count_v(u32 r) +{ + return (r >> 0U) & 0xfffU; +} +static inline u32 gr_pri_fe_chip_def_info_max_veid_count_init_v(void) +{ + return 0x00000040U; +} +static inline u32 gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r(void) +{ + return 0x00504238U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r(void) +{ + return 0x00504358U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp0_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp1_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp2_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp3_m(void) +{ + return 0x1U << 3U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp4_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp5_m(void) +{ + return 0x1U << 5U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp6_m(void) +{ + return 0x1U << 6U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp7_m(void) +{ + return 0x1U << 7U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp0_m(void) +{ + return 0x1U << 8U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp1_m(void) +{ + return 0x1U << 9U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp2_m(void) +{ + return 0x1U << 10U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp3_m(void) +{ + return 0x1U << 11U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp4_m(void) +{ + return 0x1U << 12U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp5_m(void) +{ + return 0x1U << 13U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp6_m(void) +{ + return 0x1U << 14U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp7_m(void) +{ + return 0x1U << 15U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 24U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 26U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_r(void) +{ + return 0x0050435cU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_r(void) +{ + return 0x00504360U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_r(void) +{ + return 0x0050436cU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_0_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_1_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_0_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_1_m(void) +{ + return 0x1U << 3U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 8U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 10U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_r(void) +{ + return 0x00504370U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_r(void) +{ + return 0x00504374U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_r(void) +{ + return 0x00504638U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm0_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm1_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm0_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm1_m(void) +{ + return 0x1U << 3U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm0_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm1_m(void) +{ + return 0x1U << 5U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm0_m(void) +{ + return 0x1U << 6U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm1_m(void) +{ + return 0x1U << 7U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 16U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 18U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_r(void) +{ + return 0x0050463cU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_r(void) +{ + return 0x00504640U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_r(void) +{ + return 0x005042c4U; +} +static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f(void) +{ + return 0x0U; +} +static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe0_f(void) +{ + return 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe1_f(void) +{ + return 0x2U; +} +static inline u32 gr_gpc0_tpc0_mpc_hww_esr_r(void) +{ + return 0x00504430U; +} +static inline u32 gr_gpc0_tpc0_mpc_hww_esr_reset_trigger_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_r(void) +{ + return 0x00504434U; +} +static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_veid_v(u32 r) +{ + return (r >> 0U) & 0x3fU; +} +static inline u32 gr_pri_be0_crop_status1_r(void) +{ + return 0x00410134U; +} +static inline u32 gr_pri_bes_crop_status1_r(void) +{ + return 0x00408934U; +} +static inline u32 gr_pri_be0_zrop_status_r(void) +{ + return 0x00410048U; +} +static inline u32 gr_pri_be0_zrop_status2_r(void) +{ + return 0x0041004cU; +} +static inline u32 gr_pri_bes_zrop_status_r(void) +{ + return 0x00408848U; +} +static inline u32 gr_pri_bes_zrop_status2_r(void) +{ + return 0x0040884cU; +} +static inline u32 gr_pipe_bundle_address_r(void) +{ + return 0x00400200U; +} +static inline u32 gr_pipe_bundle_address_value_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pipe_bundle_address_veid_f(u32 v) +{ + return (v & 0x3fU) << 20U; +} +static inline u32 gr_pipe_bundle_address_veid_w(void) +{ + return 0U; +} +static inline u32 gr_pipe_bundle_data_r(void) +{ + return 0x00400204U; +} +static inline u32 gr_pipe_bundle_config_r(void) +{ + return 0x00400208U; +} +static inline u32 gr_pipe_bundle_config_override_pipe_mode_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_pipe_bundle_config_override_pipe_mode_enabled_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_fe_hww_esr_r(void) +{ + return 0x00404000U; +} +static inline u32 gr_fe_hww_esr_reset_active_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_fe_hww_esr_en_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r(void) +{ + return 0x00419eacU; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_r(void) +{ + return 0x0050472cU; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_multiple_warp_errors_report_f(void) +{ + return 0x4U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_bpt_int_report_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_bpt_pause_report_f(void) +{ + return 0x20U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_single_step_complete_report_f(void) +{ + return 0x40U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_error_in_trap_report_f(void) +{ + return 0x100U; +} +static inline u32 gr_gpcs_tpcs_sms_hww_global_esr_r(void) +{ + return 0x00419eb4U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_r(void) +{ + return 0x00504734U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_m(void) +{ + return 0x1U << 5U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_pending_f(void) +{ + return 0x20U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_m(void) +{ + return 0x1U << 6U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f(void) +{ + return 0x40U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_pending_f(void) +{ + return 0x4U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_error_in_trap_m(void) +{ + return 0x1U << 8U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_error_in_trap_pending_f(void) +{ + return 0x100U; +} +static inline u32 gr_fe_go_idle_timeout_r(void) +{ + return 0x00404154U; +} +static inline u32 gr_fe_go_idle_timeout_count_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_fe_go_idle_timeout_count_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fe_go_idle_timeout_count_prod_f(void) +{ + return 0x1800U; +} +static inline u32 gr_fe_object_table_r(u32 i) +{ + return 0x00404200U + i*4U; +} +static inline u32 gr_fe_object_table_nvclass_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_fe_tpc_fs_r(u32 i) +{ + return 0x0040a200U + i*4U; +} +static inline u32 gr_pri_mme_shadow_raw_index_r(void) +{ + return 0x00404488U; +} +static inline u32 gr_pri_mme_shadow_raw_index_write_trigger_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_pri_mme_shadow_raw_data_r(void) +{ + return 0x0040448cU; +} +static inline u32 gr_mme_hww_esr_r(void) +{ + return 0x00404490U; +} +static inline u32 gr_mme_hww_esr_reset_active_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_mme_hww_esr_en_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_memfmt_hww_esr_r(void) +{ + return 0x00404600U; +} +static inline u32 gr_memfmt_hww_esr_reset_active_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_memfmt_hww_esr_en_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_fecs_cpuctl_r(void) +{ + return 0x00409100U; +} +static inline u32 gr_fecs_cpuctl_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 gr_fecs_cpuctl_alias_r(void) +{ + return 0x00409130U; +} +static inline u32 gr_fecs_cpuctl_alias_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 gr_fecs_dmactl_r(void) +{ + return 0x0040910cU; +} +static inline u32 gr_fecs_dmactl_require_ctx_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_fecs_dmactl_dmem_scrubbing_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_fecs_dmactl_imem_scrubbing_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_fecs_os_r(void) +{ + return 0x00409080U; +} +static inline u32 gr_fecs_idlestate_r(void) +{ + return 0x0040904cU; +} +static inline u32 gr_fecs_mailbox0_r(void) +{ + return 0x00409040U; +} +static inline u32 gr_fecs_mailbox1_r(void) +{ + return 0x00409044U; +} +static inline u32 gr_fecs_irqstat_r(void) +{ + return 0x00409008U; +} +static inline u32 gr_fecs_irqmode_r(void) +{ + return 0x0040900cU; +} +static inline u32 gr_fecs_irqmask_r(void) +{ + return 0x00409018U; +} +static inline u32 gr_fecs_irqdest_r(void) +{ + return 0x0040901cU; +} +static inline u32 gr_fecs_curctx_r(void) +{ + return 0x00409050U; +} +static inline u32 gr_fecs_nxtctx_r(void) +{ + return 0x00409054U; +} +static inline u32 gr_fecs_engctl_r(void) +{ + return 0x004090a4U; +} +static inline u32 gr_fecs_debug1_r(void) +{ + return 0x00409090U; +} +static inline u32 gr_fecs_debuginfo_r(void) +{ + return 0x00409094U; +} +static inline u32 gr_fecs_icd_cmd_r(void) +{ + return 0x00409200U; +} +static inline u32 gr_fecs_icd_cmd_opc_s(void) +{ + return 4U; +} +static inline u32 gr_fecs_icd_cmd_opc_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_fecs_icd_cmd_opc_m(void) +{ + return 0xfU << 0U; +} +static inline u32 gr_fecs_icd_cmd_opc_v(u32 r) +{ + return (r >> 0U) & 0xfU; +} +static inline u32 gr_fecs_icd_cmd_opc_rreg_f(void) +{ + return 0x8U; +} +static inline u32 gr_fecs_icd_cmd_opc_rstat_f(void) +{ + return 0xeU; +} +static inline u32 gr_fecs_icd_cmd_idx_f(u32 v) +{ + return (v & 0x1fU) << 8U; +} +static inline u32 gr_fecs_icd_rdata_r(void) +{ + return 0x0040920cU; +} +static inline u32 gr_fecs_imemc_r(u32 i) +{ + return 0x00409180U + i*16U; +} +static inline u32 gr_fecs_imemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 gr_fecs_imemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_fecs_imemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 gr_fecs_imemd_r(u32 i) +{ + return 0x00409184U + i*16U; +} +static inline u32 gr_fecs_imemt_r(u32 i) +{ + return 0x00409188U + i*16U; +} +static inline u32 gr_fecs_imemt_tag_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_fecs_dmemc_r(u32 i) +{ + return 0x004091c0U + i*8U; +} +static inline u32 gr_fecs_dmemc_offs_s(void) +{ + return 6U; +} +static inline u32 gr_fecs_dmemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 gr_fecs_dmemc_offs_m(void) +{ + return 0x3fU << 2U; +} +static inline u32 gr_fecs_dmemc_offs_v(u32 r) +{ + return (r >> 2U) & 0x3fU; +} +static inline u32 gr_fecs_dmemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_fecs_dmemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 gr_fecs_dmemd_r(u32 i) +{ + return 0x004091c4U + i*8U; +} +static inline u32 gr_fecs_dmatrfbase_r(void) +{ + return 0x00409110U; +} +static inline u32 gr_fecs_dmatrfmoffs_r(void) +{ + return 0x00409114U; +} +static inline u32 gr_fecs_dmatrffboffs_r(void) +{ + return 0x0040911cU; +} +static inline u32 gr_fecs_dmatrfcmd_r(void) +{ + return 0x00409118U; +} +static inline u32 gr_fecs_dmatrfcmd_imem_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 gr_fecs_dmatrfcmd_write_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 gr_fecs_dmatrfcmd_size_f(u32 v) +{ + return (v & 0x7U) << 8U; +} +static inline u32 gr_fecs_dmatrfcmd_ctxdma_f(u32 v) +{ + return (v & 0x7U) << 12U; +} +static inline u32 gr_fecs_bootvec_r(void) +{ + return 0x00409104U; +} +static inline u32 gr_fecs_bootvec_vec_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_fecs_falcon_hwcfg_r(void) +{ + return 0x00409108U; +} +static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void) +{ + return 0x0041a108U; +} +static inline u32 gr_fecs_falcon_rm_r(void) +{ + return 0x00409084U; +} +static inline u32 gr_fecs_current_ctx_r(void) +{ + return 0x00409b00U; +} +static inline u32 gr_fecs_current_ctx_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 gr_fecs_current_ctx_ptr_v(u32 r) +{ + return (r >> 0U) & 0xfffffffU; +} +static inline u32 gr_fecs_current_ctx_target_s(void) +{ + return 2U; +} +static inline u32 gr_fecs_current_ctx_target_f(u32 v) +{ + return (v & 0x3U) << 28U; +} +static inline u32 gr_fecs_current_ctx_target_m(void) +{ + return 0x3U << 28U; +} +static inline u32 gr_fecs_current_ctx_target_v(u32 r) +{ + return (r >> 28U) & 0x3U; +} +static inline u32 gr_fecs_current_ctx_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_current_ctx_target_sys_mem_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 gr_fecs_current_ctx_target_sys_mem_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 gr_fecs_current_ctx_valid_s(void) +{ + return 1U; +} +static inline u32 gr_fecs_current_ctx_valid_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 gr_fecs_current_ctx_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_fecs_current_ctx_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 gr_fecs_current_ctx_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_method_data_r(void) +{ + return 0x00409500U; +} +static inline u32 gr_fecs_method_push_r(void) +{ + return 0x00409504U; +} +static inline u32 gr_fecs_method_push_adr_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 gr_fecs_method_push_adr_bind_pointer_v(void) +{ + return 0x00000003U; +} +static inline u32 gr_fecs_method_push_adr_bind_pointer_f(void) +{ + return 0x3U; +} +static inline u32 gr_fecs_method_push_adr_discover_image_size_v(void) +{ + return 0x00000010U; +} +static inline u32 gr_fecs_method_push_adr_wfi_golden_save_v(void) +{ + return 0x00000009U; +} +static inline u32 gr_fecs_method_push_adr_restore_golden_v(void) +{ + return 0x00000015U; +} +static inline u32 gr_fecs_method_push_adr_discover_zcull_image_size_v(void) +{ + return 0x00000016U; +} +static inline u32 gr_fecs_method_push_adr_discover_pm_image_size_v(void) +{ + return 0x00000025U; +} +static inline u32 gr_fecs_method_push_adr_discover_reglist_image_size_v(void) +{ + return 0x00000030U; +} +static inline u32 gr_fecs_method_push_adr_set_reglist_bind_instance_v(void) +{ + return 0x00000031U; +} +static inline u32 gr_fecs_method_push_adr_set_reglist_virtual_address_v(void) +{ + return 0x00000032U; +} +static inline u32 gr_fecs_method_push_adr_stop_ctxsw_v(void) +{ + return 0x00000038U; +} +static inline u32 gr_fecs_method_push_adr_start_ctxsw_v(void) +{ + return 0x00000039U; +} +static inline u32 gr_fecs_method_push_adr_set_watchdog_timeout_f(void) +{ + return 0x21U; +} +static inline u32 gr_fecs_method_push_adr_discover_preemption_image_size_v(void) +{ + return 0x0000001aU; +} +static inline u32 gr_fecs_method_push_adr_halt_pipeline_v(void) +{ + return 0x00000004U; +} +static inline u32 gr_fecs_method_push_adr_configure_interrupt_completion_option_v(void) +{ + return 0x0000003aU; +} +static inline u32 gr_fecs_host_int_status_r(void) +{ + return 0x00409c18U; +} +static inline u32 gr_fecs_host_int_status_fault_during_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 gr_fecs_host_int_status_umimp_firmware_method_f(u32 v) +{ + return (v & 0x1U) << 17U; +} +static inline u32 gr_fecs_host_int_status_umimp_illegal_method_f(u32 v) +{ + return (v & 0x1U) << 18U; +} +static inline u32 gr_fecs_host_int_status_ctxsw_intr_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_fecs_host_int_clear_r(void) +{ + return 0x00409c20U; +} +static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_clear_f(void) +{ + return 0x2U; +} +static inline u32 gr_fecs_host_int_enable_r(void) +{ + return 0x00409c24U; +} +static inline u32 gr_fecs_host_int_enable_ctxsw_intr1_enable_f(void) +{ + return 0x2U; +} +static inline u32 gr_fecs_host_int_enable_fault_during_ctxsw_enable_f(void) +{ + return 0x10000U; +} +static inline u32 gr_fecs_host_int_enable_umimp_firmware_method_enable_f(void) +{ + return 0x20000U; +} +static inline u32 gr_fecs_host_int_enable_umimp_illegal_method_enable_f(void) +{ + return 0x40000U; +} +static inline u32 gr_fecs_host_int_enable_watchdog_enable_f(void) +{ + return 0x80000U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_r(void) +{ + return 0x00409614U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f(void) +{ + return 0x10U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f(void) +{ + return 0x20U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f(void) +{ + return 0x40U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f(void) +{ + return 0x100U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f(void) +{ + return 0x200U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_s(void) +{ + return 1U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_f(u32 v) +{ + return (v & 0x1U) << 10U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_m(void) +{ + return 0x1U << 10U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_v(u32 r) +{ + return (r >> 10U) & 0x1U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f(void) +{ + return 0x400U; +} +static inline u32 gr_fecs_ctx_state_store_major_rev_id_r(void) +{ + return 0x0040960cU; +} +static inline u32 gr_fecs_ctxsw_mailbox_r(u32 i) +{ + return 0x00409800U + i*4U; +} +static inline u32 gr_fecs_ctxsw_mailbox__size_1_v(void) +{ + return 0x00000010U; +} +static inline u32 gr_fecs_ctxsw_mailbox_value_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_fecs_ctxsw_mailbox_value_pass_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_fecs_ctxsw_mailbox_value_fail_v(void) +{ + return 0x00000002U; +} +static inline u32 gr_fecs_ctxsw_mailbox_set_r(u32 i) +{ + return 0x004098c0U + i*4U; +} +static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_fecs_ctxsw_mailbox_clear_r(u32 i) +{ + return 0x00409840U + i*4U; +} +static inline u32 gr_fecs_ctxsw_mailbox_clear_value_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_fecs_fs_r(void) +{ + return 0x00409604U; +} +static inline u32 gr_fecs_fs_num_available_gpcs_s(void) +{ + return 5U; +} +static inline u32 gr_fecs_fs_num_available_gpcs_f(u32 v) +{ + return (v & 0x1fU) << 0U; +} +static inline u32 gr_fecs_fs_num_available_gpcs_m(void) +{ + return 0x1fU << 0U; +} +static inline u32 gr_fecs_fs_num_available_gpcs_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 gr_fecs_fs_num_available_fbps_s(void) +{ + return 5U; +} +static inline u32 gr_fecs_fs_num_available_fbps_f(u32 v) +{ + return (v & 0x1fU) << 16U; +} +static inline u32 gr_fecs_fs_num_available_fbps_m(void) +{ + return 0x1fU << 16U; +} +static inline u32 gr_fecs_fs_num_available_fbps_v(u32 r) +{ + return (r >> 16U) & 0x1fU; +} +static inline u32 gr_fecs_cfg_r(void) +{ + return 0x00409620U; +} +static inline u32 gr_fecs_cfg_imem_sz_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 gr_fecs_rc_lanes_r(void) +{ + return 0x00409880U; +} +static inline u32 gr_fecs_rc_lanes_num_chains_s(void) +{ + return 6U; +} +static inline u32 gr_fecs_rc_lanes_num_chains_f(u32 v) +{ + return (v & 0x3fU) << 0U; +} +static inline u32 gr_fecs_rc_lanes_num_chains_m(void) +{ + return 0x3fU << 0U; +} +static inline u32 gr_fecs_rc_lanes_num_chains_v(u32 r) +{ + return (r >> 0U) & 0x3fU; +} +static inline u32 gr_fecs_ctxsw_status_1_r(void) +{ + return 0x00409400U; +} +static inline u32 gr_fecs_ctxsw_status_1_arb_busy_s(void) +{ + return 1U; +} +static inline u32 gr_fecs_ctxsw_status_1_arb_busy_f(u32 v) +{ + return (v & 0x1U) << 12U; +} +static inline u32 gr_fecs_ctxsw_status_1_arb_busy_m(void) +{ + return 0x1U << 12U; +} +static inline u32 gr_fecs_ctxsw_status_1_arb_busy_v(u32 r) +{ + return (r >> 12U) & 0x1U; +} +static inline u32 gr_fecs_arb_ctx_adr_r(void) +{ + return 0x00409a24U; +} +static inline u32 gr_fecs_new_ctx_r(void) +{ + return 0x00409b04U; +} +static inline u32 gr_fecs_new_ctx_ptr_s(void) +{ + return 28U; +} +static inline u32 gr_fecs_new_ctx_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 gr_fecs_new_ctx_ptr_m(void) +{ + return 0xfffffffU << 0U; +} +static inline u32 gr_fecs_new_ctx_ptr_v(u32 r) +{ + return (r >> 0U) & 0xfffffffU; +} +static inline u32 gr_fecs_new_ctx_target_s(void) +{ + return 2U; +} +static inline u32 gr_fecs_new_ctx_target_f(u32 v) +{ + return (v & 0x3U) << 28U; +} +static inline u32 gr_fecs_new_ctx_target_m(void) +{ + return 0x3U << 28U; +} +static inline u32 gr_fecs_new_ctx_target_v(u32 r) +{ + return (r >> 28U) & 0x3U; +} +static inline u32 gr_fecs_new_ctx_valid_s(void) +{ + return 1U; +} +static inline u32 gr_fecs_new_ctx_valid_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 gr_fecs_new_ctx_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_fecs_new_ctx_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 gr_fecs_arb_ctx_ptr_r(void) +{ + return 0x00409a0cU; +} +static inline u32 gr_fecs_arb_ctx_ptr_ptr_s(void) +{ + return 28U; +} +static inline u32 gr_fecs_arb_ctx_ptr_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 gr_fecs_arb_ctx_ptr_ptr_m(void) +{ + return 0xfffffffU << 0U; +} +static inline u32 gr_fecs_arb_ctx_ptr_ptr_v(u32 r) +{ + return (r >> 0U) & 0xfffffffU; +} +static inline u32 gr_fecs_arb_ctx_ptr_target_s(void) +{ + return 2U; +} +static inline u32 gr_fecs_arb_ctx_ptr_target_f(u32 v) +{ + return (v & 0x3U) << 28U; +} +static inline u32 gr_fecs_arb_ctx_ptr_target_m(void) +{ + return 0x3U << 28U; +} +static inline u32 gr_fecs_arb_ctx_ptr_target_v(u32 r) +{ + return (r >> 28U) & 0x3U; +} +static inline u32 gr_fecs_arb_ctx_cmd_r(void) +{ + return 0x00409a10U; +} +static inline u32 gr_fecs_arb_ctx_cmd_cmd_s(void) +{ + return 5U; +} +static inline u32 gr_fecs_arb_ctx_cmd_cmd_f(u32 v) +{ + return (v & 0x1fU) << 0U; +} +static inline u32 gr_fecs_arb_ctx_cmd_cmd_m(void) +{ + return 0x1fU << 0U; +} +static inline u32 gr_fecs_arb_ctx_cmd_cmd_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 gr_fecs_ctxsw_status_fe_0_r(void) +{ + return 0x00409c00U; +} +static inline u32 gr_gpc0_gpccs_ctxsw_status_gpc_0_r(void) +{ + return 0x00502c04U; +} +static inline u32 gr_gpc0_gpccs_ctxsw_status_1_r(void) +{ + return 0x00502400U; +} +static inline u32 gr_fecs_ctxsw_idlestate_r(void) +{ + return 0x00409420U; +} +static inline u32 gr_fecs_feature_override_ecc_r(void) +{ + return 0x00409658U; +} +static inline u32 gr_fecs_feature_override_ecc_sm_lrf_override_v(u32 r) +{ + return (r >> 3U) & 0x1U; +} +static inline u32 gr_fecs_feature_override_ecc_ltc_override_v(u32 r) +{ + return (r >> 15U) & 0x1U; +} +static inline u32 gr_fecs_feature_override_ecc_sm_lrf_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 gr_fecs_feature_override_ecc_ltc_v(u32 r) +{ + return (r >> 12U) & 0x1U; +} +static inline u32 gr_gpc0_gpccs_ctxsw_idlestate_r(void) +{ + return 0x00502420U; +} +static inline u32 gr_rstr2d_gpc_map_r(u32 i) +{ + return 0x0040780cU + i*4U; +} +static inline u32 gr_rstr2d_map_table_cfg_r(void) +{ + return 0x004078bcU; +} +static inline u32 gr_rstr2d_map_table_cfg_row_offset_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_rstr2d_map_table_cfg_num_entries_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_pd_hww_esr_r(void) +{ + return 0x00406018U; +} +static inline u32 gr_pd_hww_esr_reset_active_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_pd_hww_esr_en_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_pd_num_tpc_per_gpc_r(u32 i) +{ + return 0x00406028U + i*4U; +} +static inline u32 gr_pd_num_tpc_per_gpc__size_1_v(void) +{ + return 0x00000004U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count0_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count1_f(u32 v) +{ + return (v & 0xfU) << 4U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count2_f(u32 v) +{ + return (v & 0xfU) << 8U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count3_f(u32 v) +{ + return (v & 0xfU) << 12U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count4_f(u32 v) +{ + return (v & 0xfU) << 16U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count5_f(u32 v) +{ + return (v & 0xfU) << 20U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count6_f(u32 v) +{ + return (v & 0xfU) << 24U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count7_f(u32 v) +{ + return (v & 0xfU) << 28U; +} +static inline u32 gr_pd_ab_dist_cfg0_r(void) +{ + return 0x004064c0U; +} +static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_en_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_dis_f(void) +{ + return 0x0U; +} +static inline u32 gr_pd_ab_dist_cfg1_r(void) +{ + return 0x004064c4U; +} +static inline u32 gr_pd_ab_dist_cfg1_max_batches_init_f(void) +{ + return 0xffffU; +} +static inline u32 gr_pd_ab_dist_cfg1_max_output_f(u32 v) +{ + return (v & 0xffffU) << 16U; +} +static inline u32 gr_pd_ab_dist_cfg1_max_output_granularity_v(void) +{ + return 0x00000080U; +} +static inline u32 gr_pd_ab_dist_cfg2_r(void) +{ + return 0x004064c8U; +} +static inline u32 gr_pd_ab_dist_cfg2_token_limit_f(u32 v) +{ + return (v & 0x1fffU) << 0U; +} +static inline u32 gr_pd_ab_dist_cfg2_token_limit_init_v(void) +{ + return 0x00001680U; +} +static inline u32 gr_pd_ab_dist_cfg2_state_limit_f(u32 v) +{ + return (v & 0x1fffU) << 16U; +} +static inline u32 gr_pd_ab_dist_cfg2_state_limit_scc_bundle_granularity_v(void) +{ + return 0x00000020U; +} +static inline u32 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(void) +{ + return 0x00001680U; +} +static inline u32 gr_pd_dist_skip_table_r(u32 i) +{ + return 0x004064d0U + i*4U; +} +static inline u32 gr_pd_dist_skip_table__size_1_v(void) +{ + return 0x00000008U; +} +static inline u32 gr_pd_dist_skip_table_gpc_4n0_mask_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_pd_dist_skip_table_gpc_4n1_mask_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_pd_dist_skip_table_gpc_4n2_mask_f(u32 v) +{ + return (v & 0xffU) << 16U; +} +static inline u32 gr_pd_dist_skip_table_gpc_4n3_mask_f(u32 v) +{ + return (v & 0xffU) << 24U; +} +static inline u32 gr_ds_debug_r(void) +{ + return 0x00405800U; +} +static inline u32 gr_ds_debug_timeslice_mode_disable_f(void) +{ + return 0x0U; +} +static inline u32 gr_ds_debug_timeslice_mode_enable_f(void) +{ + return 0x8000000U; +} +static inline u32 gr_ds_zbc_color_r_r(void) +{ + return 0x00405804U; +} +static inline u32 gr_ds_zbc_color_r_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_ds_zbc_color_g_r(void) +{ + return 0x00405808U; +} +static inline u32 gr_ds_zbc_color_g_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_ds_zbc_color_b_r(void) +{ + return 0x0040580cU; +} +static inline u32 gr_ds_zbc_color_b_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_ds_zbc_color_a_r(void) +{ + return 0x00405810U; +} +static inline u32 gr_ds_zbc_color_a_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_ds_zbc_color_fmt_r(void) +{ + return 0x00405814U; +} +static inline u32 gr_ds_zbc_color_fmt_val_f(u32 v) +{ + return (v & 0x7fU) << 0U; +} +static inline u32 gr_ds_zbc_color_fmt_val_invalid_f(void) +{ + return 0x0U; +} +static inline u32 gr_ds_zbc_color_fmt_val_zero_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_ds_zbc_color_fmt_val_unorm_one_v(void) +{ + return 0x00000002U; +} +static inline u32 gr_ds_zbc_color_fmt_val_rf32_gf32_bf32_af32_v(void) +{ + return 0x00000004U; +} +static inline u32 gr_ds_zbc_color_fmt_val_a8_b8_g8_r8_v(void) +{ + return 0x00000028U; +} +static inline u32 gr_ds_zbc_z_r(void) +{ + return 0x00405818U; +} +static inline u32 gr_ds_zbc_z_val_s(void) +{ + return 32U; +} +static inline u32 gr_ds_zbc_z_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_ds_zbc_z_val_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 gr_ds_zbc_z_val_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 gr_ds_zbc_z_val__init_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_ds_zbc_z_val__init_f(void) +{ + return 0x0U; +} +static inline u32 gr_ds_zbc_z_fmt_r(void) +{ + return 0x0040581cU; +} +static inline u32 gr_ds_zbc_z_fmt_val_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_ds_zbc_z_fmt_val_invalid_f(void) +{ + return 0x0U; +} +static inline u32 gr_ds_zbc_z_fmt_val_fp32_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_ds_zbc_tbl_index_r(void) +{ + return 0x00405820U; +} +static inline u32 gr_ds_zbc_tbl_index_val_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_ds_zbc_tbl_ld_r(void) +{ + return 0x00405824U; +} +static inline u32 gr_ds_zbc_tbl_ld_select_c_f(void) +{ + return 0x0U; +} +static inline u32 gr_ds_zbc_tbl_ld_select_z_f(void) +{ + return 0x1U; +} +static inline u32 gr_ds_zbc_tbl_ld_action_write_f(void) +{ + return 0x0U; +} +static inline u32 gr_ds_zbc_tbl_ld_trigger_active_f(void) +{ + return 0x4U; +} +static inline u32 gr_ds_tga_constraintlogic_beta_r(void) +{ + return 0x00405830U; +} +static inline u32 gr_ds_tga_constraintlogic_beta_cbsize_f(u32 v) +{ + return (v & 0x3fffffU) << 0U; +} +static inline u32 gr_ds_tga_constraintlogic_alpha_r(void) +{ + return 0x0040585cU; +} +static inline u32 gr_ds_tga_constraintlogic_alpha_cbsize_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_ds_hww_esr_r(void) +{ + return 0x00405840U; +} +static inline u32 gr_ds_hww_esr_reset_s(void) +{ + return 1U; +} +static inline u32 gr_ds_hww_esr_reset_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 gr_ds_hww_esr_reset_m(void) +{ + return 0x1U << 30U; +} +static inline u32 gr_ds_hww_esr_reset_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 gr_ds_hww_esr_reset_task_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_ds_hww_esr_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_ds_hww_esr_en_enabled_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_ds_hww_esr_2_r(void) +{ + return 0x00405848U; +} +static inline u32 gr_ds_hww_esr_2_reset_s(void) +{ + return 1U; +} +static inline u32 gr_ds_hww_esr_2_reset_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 gr_ds_hww_esr_2_reset_m(void) +{ + return 0x1U << 30U; +} +static inline u32 gr_ds_hww_esr_2_reset_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 gr_ds_hww_esr_2_reset_task_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_ds_hww_esr_2_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_ds_hww_esr_2_en_enabled_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_ds_hww_report_mask_r(void) +{ + return 0x00405844U; +} +static inline u32 gr_ds_hww_report_mask_sph0_err_report_f(void) +{ + return 0x1U; +} +static inline u32 gr_ds_hww_report_mask_sph1_err_report_f(void) +{ + return 0x2U; +} +static inline u32 gr_ds_hww_report_mask_sph2_err_report_f(void) +{ + return 0x4U; +} +static inline u32 gr_ds_hww_report_mask_sph3_err_report_f(void) +{ + return 0x8U; +} +static inline u32 gr_ds_hww_report_mask_sph4_err_report_f(void) +{ + return 0x10U; +} +static inline u32 gr_ds_hww_report_mask_sph5_err_report_f(void) +{ + return 0x20U; +} +static inline u32 gr_ds_hww_report_mask_sph6_err_report_f(void) +{ + return 0x40U; +} +static inline u32 gr_ds_hww_report_mask_sph7_err_report_f(void) +{ + return 0x80U; +} +static inline u32 gr_ds_hww_report_mask_sph8_err_report_f(void) +{ + return 0x100U; +} +static inline u32 gr_ds_hww_report_mask_sph9_err_report_f(void) +{ + return 0x200U; +} +static inline u32 gr_ds_hww_report_mask_sph10_err_report_f(void) +{ + return 0x400U; +} +static inline u32 gr_ds_hww_report_mask_sph11_err_report_f(void) +{ + return 0x800U; +} +static inline u32 gr_ds_hww_report_mask_sph12_err_report_f(void) +{ + return 0x1000U; +} +static inline u32 gr_ds_hww_report_mask_sph13_err_report_f(void) +{ + return 0x2000U; +} +static inline u32 gr_ds_hww_report_mask_sph14_err_report_f(void) +{ + return 0x4000U; +} +static inline u32 gr_ds_hww_report_mask_sph15_err_report_f(void) +{ + return 0x8000U; +} +static inline u32 gr_ds_hww_report_mask_sph16_err_report_f(void) +{ + return 0x10000U; +} +static inline u32 gr_ds_hww_report_mask_sph17_err_report_f(void) +{ + return 0x20000U; +} +static inline u32 gr_ds_hww_report_mask_sph18_err_report_f(void) +{ + return 0x40000U; +} +static inline u32 gr_ds_hww_report_mask_sph19_err_report_f(void) +{ + return 0x80000U; +} +static inline u32 gr_ds_hww_report_mask_sph20_err_report_f(void) +{ + return 0x100000U; +} +static inline u32 gr_ds_hww_report_mask_sph21_err_report_f(void) +{ + return 0x200000U; +} +static inline u32 gr_ds_hww_report_mask_sph22_err_report_f(void) +{ + return 0x400000U; +} +static inline u32 gr_ds_hww_report_mask_sph23_err_report_f(void) +{ + return 0x800000U; +} +static inline u32 gr_ds_hww_report_mask_2_r(void) +{ + return 0x0040584cU; +} +static inline u32 gr_ds_hww_report_mask_2_sph24_err_report_f(void) +{ + return 0x1U; +} +static inline u32 gr_ds_num_tpc_per_gpc_r(u32 i) +{ + return 0x00405870U + i*4U; +} +static inline u32 gr_scc_bundle_cb_base_r(void) +{ + return 0x00408004U; +} +static inline u32 gr_scc_bundle_cb_base_addr_39_8_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_scc_bundle_cb_base_addr_39_8_align_bits_v(void) +{ + return 0x00000008U; +} +static inline u32 gr_scc_bundle_cb_size_r(void) +{ + return 0x00408008U; +} +static inline u32 gr_scc_bundle_cb_size_div_256b_f(u32 v) +{ + return (v & 0x7ffU) << 0U; +} +static inline u32 gr_scc_bundle_cb_size_div_256b__prod_v(void) +{ + return 0x00000030U; +} +static inline u32 gr_scc_bundle_cb_size_div_256b_byte_granularity_v(void) +{ + return 0x00000100U; +} +static inline u32 gr_scc_bundle_cb_size_valid_false_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_scc_bundle_cb_size_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 gr_scc_bundle_cb_size_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_scc_pagepool_base_r(void) +{ + return 0x0040800cU; +} +static inline u32 gr_scc_pagepool_base_addr_39_8_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_scc_pagepool_base_addr_39_8_align_bits_v(void) +{ + return 0x00000008U; +} +static inline u32 gr_scc_pagepool_r(void) +{ + return 0x00408010U; +} +static inline u32 gr_scc_pagepool_total_pages_f(u32 v) +{ + return (v & 0x3ffU) << 0U; +} +static inline u32 gr_scc_pagepool_total_pages_hwmax_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_scc_pagepool_total_pages_hwmax_value_v(void) +{ + return 0x00000200U; +} +static inline u32 gr_scc_pagepool_total_pages_byte_granularity_v(void) +{ + return 0x00000100U; +} +static inline u32 gr_scc_pagepool_max_valid_pages_s(void) +{ + return 10U; +} +static inline u32 gr_scc_pagepool_max_valid_pages_f(u32 v) +{ + return (v & 0x3ffU) << 10U; +} +static inline u32 gr_scc_pagepool_max_valid_pages_m(void) +{ + return 0x3ffU << 10U; +} +static inline u32 gr_scc_pagepool_max_valid_pages_v(u32 r) +{ + return (r >> 10U) & 0x3ffU; +} +static inline u32 gr_scc_pagepool_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_scc_init_r(void) +{ + return 0x0040802cU; +} +static inline u32 gr_scc_init_ram_trigger_f(void) +{ + return 0x1U; +} +static inline u32 gr_scc_hww_esr_r(void) +{ + return 0x00408030U; +} +static inline u32 gr_scc_hww_esr_reset_active_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_scc_hww_esr_en_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_sked_hww_esr_r(void) +{ + return 0x00407020U; +} +static inline u32 gr_sked_hww_esr_reset_active_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_sked_hww_esr_en_r(void) +{ + return 0x00407024U; +} +static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_m(void) +{ + return 0x1U << 25U; +} +static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_enabled_f(void) +{ + return 0x2000000U; +} +static inline u32 gr_cwd_fs_r(void) +{ + return 0x00405b00U; +} +static inline u32 gr_cwd_fs_num_gpcs_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_cwd_fs_num_tpcs_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_cwd_gpc_tpc_id_r(u32 i) +{ + return 0x00405b60U + i*4U; +} +static inline u32 gr_cwd_gpc_tpc_id_tpc0_s(void) +{ + return 4U; +} +static inline u32 gr_cwd_gpc_tpc_id_tpc0_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_cwd_gpc_tpc_id_gpc0_s(void) +{ + return 4U; +} +static inline u32 gr_cwd_gpc_tpc_id_gpc0_f(u32 v) +{ + return (v & 0xfU) << 4U; +} +static inline u32 gr_cwd_gpc_tpc_id_tpc1_f(u32 v) +{ + return (v & 0xfU) << 8U; +} +static inline u32 gr_cwd_sm_id_r(u32 i) +{ + return 0x00405ba0U + i*4U; +} +static inline u32 gr_cwd_sm_id__size_1_v(void) +{ + return 0x00000010U; +} +static inline u32 gr_cwd_sm_id_tpc0_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_cwd_sm_id_tpc1_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_gpc0_fs_gpc_r(void) +{ + return 0x00502608U; +} +static inline u32 gr_gpc0_fs_gpc_num_available_tpcs_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 gr_gpc0_fs_gpc_num_available_zculls_v(u32 r) +{ + return (r >> 16U) & 0x1fU; +} +static inline u32 gr_gpc0_cfg_r(void) +{ + return 0x00502620U; +} +static inline u32 gr_gpc0_cfg_imem_sz_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 gr_gpccs_rc_lanes_r(void) +{ + return 0x00502880U; +} +static inline u32 gr_gpccs_rc_lanes_num_chains_s(void) +{ + return 6U; +} +static inline u32 gr_gpccs_rc_lanes_num_chains_f(u32 v) +{ + return (v & 0x3fU) << 0U; +} +static inline u32 gr_gpccs_rc_lanes_num_chains_m(void) +{ + return 0x3fU << 0U; +} +static inline u32 gr_gpccs_rc_lanes_num_chains_v(u32 r) +{ + return (r >> 0U) & 0x3fU; +} +static inline u32 gr_gpccs_rc_lane_size_r(void) +{ + return 0x00502910U; +} +static inline u32 gr_gpccs_rc_lane_size_v_s(void) +{ + return 24U; +} +static inline u32 gr_gpccs_rc_lane_size_v_f(u32 v) +{ + return (v & 0xffffffU) << 0U; +} +static inline u32 gr_gpccs_rc_lane_size_v_m(void) +{ + return 0xffffffU << 0U; +} +static inline u32 gr_gpccs_rc_lane_size_v_v(u32 r) +{ + return (r >> 0U) & 0xffffffU; +} +static inline u32 gr_gpccs_rc_lane_size_v_0_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpccs_rc_lane_size_v_0_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpc0_zcull_fs_r(void) +{ + return 0x00500910U; +} +static inline u32 gr_gpc0_zcull_fs_num_sms_f(u32 v) +{ + return (v & 0x1ffU) << 0U; +} +static inline u32 gr_gpc0_zcull_fs_num_active_banks_f(u32 v) +{ + return (v & 0xfU) << 16U; +} +static inline u32 gr_gpc0_zcull_ram_addr_r(void) +{ + return 0x00500914U; +} +static inline u32 gr_gpc0_zcull_ram_addr_tiles_per_hypertile_row_per_gpc_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_gpc0_zcull_ram_addr_row_offset_f(u32 v) +{ + return (v & 0xfU) << 8U; +} +static inline u32 gr_gpc0_zcull_sm_num_rcp_r(void) +{ + return 0x00500918U; +} +static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative_f(u32 v) +{ + return (v & 0xffffffU) << 0U; +} +static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative__max_v(void) +{ + return 0x00800000U; +} +static inline u32 gr_gpc0_zcull_total_ram_size_r(void) +{ + return 0x00500920U; +} +static inline u32 gr_gpc0_zcull_total_ram_size_num_aliquots_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_zcull_zcsize_r(u32 i) +{ + return 0x00500a04U + i*32U; +} +static inline u32 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(void) +{ + return 0x00000040U; +} +static inline u32 gr_gpc0_zcull_zcsize_width_subregion__multiple_v(void) +{ + return 0x00000010U; +} +static inline u32 gr_gpc0_gpm_pd_sm_id_r(u32 i) +{ + return 0x00500c10U + i*4U; +} +static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(u32 i) +{ + return 0x00500c30U + i*4U; +} +static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 gr_gpc0_tpc0_pe_cfg_smid_r(void) +{ + return 0x00504088U; +} +static inline u32 gr_gpc0_tpc0_pe_cfg_smid_value_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_tpc0_sm_cfg_r(void) +{ + return 0x00504608U; +} +static inline u32 gr_gpc0_tpc0_sm_cfg_tpc_id_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_tpc0_sm_cfg_tpc_id_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_gpc0_tpc0_sm_arch_r(void) +{ + return 0x00504330U; +} +static inline u32 gr_gpc0_tpc0_sm_arch_warp_count_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 gr_gpc0_tpc0_sm_arch_spa_version_v(u32 r) +{ + return (r >> 8U) & 0xfffU; +} +static inline u32 gr_gpc0_tpc0_sm_arch_sm_version_v(u32 r) +{ + return (r >> 20U) & 0xfffU; +} +static inline u32 gr_gpc0_ppc0_pes_vsc_strem_r(void) +{ + return 0x00503018U; +} +static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_true_f(void) +{ + return 0x1U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_r(void) +{ + return 0x005030c0U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_f(u32 v) +{ + return (v & 0x3fffffU) << 0U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_m(void) +{ + return 0x3fffffU << 0U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v(void) +{ + return 0x00000480U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v(void) +{ + return 0x00000d10U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(void) +{ + return 0x00000020U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_offset_r(void) +{ + return 0x005030f4U; +} +static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_r(void) +{ + return 0x005030e4U; +} +static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(void) +{ + return 0x00000800U; +} +static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(void) +{ + return 0x00000020U; +} +static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_offset_r(void) +{ + return 0x005030f8U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_r(void) +{ + return 0x005030f0U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_f(u32 v) +{ + return (v & 0x3fffffU) << 0U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_default_v(void) +{ + return 0x00000480U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_r(void) +{ + return 0x00419e00U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_base_addr_43_12_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_r(void) +{ + return 0x00419e04U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_s(void) +{ + return 21U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_f(u32 v) +{ + return (v & 0x1fffffU) << 0U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_m(void) +{ + return 0x1fffffU << 0U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_v(u32 r) +{ + return (r >> 0U) & 0x1fffffU; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_granularity_f(void) +{ + return 0x80U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_s(void) +{ + return 1U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_gpccs_falcon_addr_r(void) +{ + return 0x0041a0acU; +} +static inline u32 gr_gpccs_falcon_addr_lsb_s(void) +{ + return 6U; +} +static inline u32 gr_gpccs_falcon_addr_lsb_f(u32 v) +{ + return (v & 0x3fU) << 0U; +} +static inline u32 gr_gpccs_falcon_addr_lsb_m(void) +{ + return 0x3fU << 0U; +} +static inline u32 gr_gpccs_falcon_addr_lsb_v(u32 r) +{ + return (r >> 0U) & 0x3fU; +} +static inline u32 gr_gpccs_falcon_addr_lsb_init_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpccs_falcon_addr_lsb_init_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpccs_falcon_addr_msb_s(void) +{ + return 6U; +} +static inline u32 gr_gpccs_falcon_addr_msb_f(u32 v) +{ + return (v & 0x3fU) << 6U; +} +static inline u32 gr_gpccs_falcon_addr_msb_m(void) +{ + return 0x3fU << 6U; +} +static inline u32 gr_gpccs_falcon_addr_msb_v(u32 r) +{ + return (r >> 6U) & 0x3fU; +} +static inline u32 gr_gpccs_falcon_addr_msb_init_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpccs_falcon_addr_msb_init_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpccs_falcon_addr_ext_s(void) +{ + return 12U; +} +static inline u32 gr_gpccs_falcon_addr_ext_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 gr_gpccs_falcon_addr_ext_m(void) +{ + return 0xfffU << 0U; +} +static inline u32 gr_gpccs_falcon_addr_ext_v(u32 r) +{ + return (r >> 0U) & 0xfffU; +} +static inline u32 gr_gpccs_cpuctl_r(void) +{ + return 0x0041a100U; +} +static inline u32 gr_gpccs_cpuctl_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 gr_gpccs_dmactl_r(void) +{ + return 0x0041a10cU; +} +static inline u32 gr_gpccs_dmactl_require_ctx_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_gpccs_dmactl_dmem_scrubbing_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_gpccs_dmactl_imem_scrubbing_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_gpccs_imemc_r(u32 i) +{ + return 0x0041a180U + i*16U; +} +static inline u32 gr_gpccs_imemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 gr_gpccs_imemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_gpccs_imemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 gr_gpccs_imemd_r(u32 i) +{ + return 0x0041a184U + i*16U; +} +static inline u32 gr_gpccs_imemt_r(u32 i) +{ + return 0x0041a188U + i*16U; +} +static inline u32 gr_gpccs_imemt__size_1_v(void) +{ + return 0x00000004U; +} +static inline u32 gr_gpccs_imemt_tag_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpccs_dmemc_r(u32 i) +{ + return 0x0041a1c0U + i*8U; +} +static inline u32 gr_gpccs_dmemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 gr_gpccs_dmemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_gpccs_dmemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 gr_gpccs_dmemd_r(u32 i) +{ + return 0x0041a1c4U + i*8U; +} +static inline u32 gr_gpccs_ctxsw_mailbox_r(u32 i) +{ + return 0x0041a800U + i*4U; +} +static inline u32 gr_gpccs_ctxsw_mailbox_value_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_r(void) +{ + return 0x00418e24U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_s(void) +{ + return 32U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_r(void) +{ + return 0x00418e28U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_s(void) +{ + return 11U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_f(u32 v) +{ + return (v & 0x7ffU) << 0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_m(void) +{ + return 0x7ffU << 0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_v(u32 r) +{ + return (r >> 0U) & 0x7ffU; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_v(void) +{ + return 0x00000030U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_f(void) +{ + return 0x30U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_s(void) +{ + return 1U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_r(void) +{ + return 0x005001dcU; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(void) +{ + return 0x000004b0U; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v(void) +{ + return 0x00000100U; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_r(void) +{ + return 0x005001d8U; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_align_bits_v(void) +{ + return 0x00000008U; +} +static inline u32 gr_gpcs_swdx_beta_cb_ctrl_r(void) +{ + return 0x004181e4U; +} +static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_gfxp_v(void) +{ + return 0x00000100U; +} +static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_r(void) +{ + return 0x0041befcU; +} +static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_cbes_reserve_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 gr_gpcs_swdx_tc_beta_cb_size_r(u32 i) +{ + return 0x00418ea0U + i*4U; +} +static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_f(u32 v) +{ + return (v & 0x3fffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_m(void) +{ + return 0x3fffffU << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_r_r(u32 i) +{ + return 0x00418010U + i*4U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_r_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_g_r(u32 i) +{ + return 0x0041804cU + i*4U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_g_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_b_r(u32 i) +{ + return 0x00418088U + i*4U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_b_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_a_r(u32 i) +{ + return 0x004180c4U + i*4U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_a_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_c_01_to_04_format_r(void) +{ + return 0x00418100U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_z_r(u32 i) +{ + return 0x00418110U + i*4U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_z_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_z_01_to_04_format_r(void) +{ + return 0x0041814cU; +} +static inline u32 gr_gpcs_swdx_dss_zbc_s_r(u32 i) +{ + return 0x0041815cU + i*4U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_s_val_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_s_01_to_04_format_r(void) +{ + return 0x00418198U; +} +static inline u32 gr_gpcs_setup_attrib_cb_base_r(void) +{ + return 0x00418810U; +} +static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v(void) +{ + return 0x0000000cU; +} +static inline u32 gr_gpcs_setup_attrib_cb_base_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_crstr_gpc_map_r(u32 i) +{ + return 0x00418b08U + i*4U; +} +static inline u32 gr_crstr_gpc_map_tile0_f(u32 v) +{ + return (v & 0x1fU) << 0U; +} +static inline u32 gr_crstr_gpc_map_tile1_f(u32 v) +{ + return (v & 0x1fU) << 5U; +} +static inline u32 gr_crstr_gpc_map_tile2_f(u32 v) +{ + return (v & 0x1fU) << 10U; +} +static inline u32 gr_crstr_gpc_map_tile3_f(u32 v) +{ + return (v & 0x1fU) << 15U; +} +static inline u32 gr_crstr_gpc_map_tile4_f(u32 v) +{ + return (v & 0x1fU) << 20U; +} +static inline u32 gr_crstr_gpc_map_tile5_f(u32 v) +{ + return (v & 0x1fU) << 25U; +} +static inline u32 gr_crstr_map_table_cfg_r(void) +{ + return 0x00418bb8U; +} +static inline u32 gr_crstr_map_table_cfg_row_offset_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_crstr_map_table_cfg_num_entries_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_r(u32 i) +{ + return 0x00418980U + i*4U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_0_f(u32 v) +{ + return (v & 0x7U) << 0U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_1_f(u32 v) +{ + return (v & 0x7U) << 4U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_2_f(u32 v) +{ + return (v & 0x7U) << 8U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_3_f(u32 v) +{ + return (v & 0x7U) << 12U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_4_f(u32 v) +{ + return (v & 0x7U) << 16U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_5_f(u32 v) +{ + return (v & 0x7U) << 20U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_6_f(u32 v) +{ + return (v & 0x7U) << 24U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_7_f(u32 v) +{ + return (v & 0x7U) << 28U; +} +static inline u32 gr_gpcs_gpm_pd_cfg_r(void) +{ + return 0x00418c6cU; +} +static inline u32 gr_gpcs_gcc_pagepool_base_r(void) +{ + return 0x00419004U; +} +static inline u32 gr_gpcs_gcc_pagepool_base_addr_39_8_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_gcc_pagepool_r(void) +{ + return 0x00419008U; +} +static inline u32 gr_gpcs_gcc_pagepool_total_pages_f(u32 v) +{ + return (v & 0x3ffU) << 0U; +} +static inline u32 gr_gpcs_tpcs_pe_vaf_r(void) +{ + return 0x0041980cU; +} +static inline u32 gr_gpcs_tpcs_pe_vaf_fast_mode_switch_true_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_r(void) +{ + return 0x00419848U; +} +static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_v_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_f(u32 v) +{ + return (v & 0x1U) << 28U; +} +static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_true_f(void) +{ + return 0x10000000U; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_r(void) +{ + return 0x00419c00U; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_enabled_f(void) +{ + return 0x8U; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_r(void) +{ + return 0x00419c2cU; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_v_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_f(u32 v) +{ + return (v & 0x1U) << 28U; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(void) +{ + return 0x10000000U; +} +static inline u32 gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(void) +{ + return 0x00419ea8U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_r(void) +{ + return 0x00504728U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_error_report_f(void) +{ + return 0x2U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_api_stack_error_report_f(void) +{ + return 0x4U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_wrap_report_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_pc_report_f(void) +{ + return 0x20U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_overflow_report_f(void) +{ + return 0x40U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_reg_report_f(void) +{ + return 0x100U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_encoding_report_f(void) +{ + return 0x200U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_param_report_f(void) +{ + return 0x800U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_reg_report_f(void) +{ + return 0x2000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_addr_report_f(void) +{ + return 0x4000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_addr_report_f(void) +{ + return 0x8000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_addr_space_report_f(void) +{ + return 0x10000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_const_addr_ldc_report_f(void) +{ + return 0x40000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_mmu_fault_report_f(void) +{ + return 0x800000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_overflow_report_f(void) +{ + return 0x400000U; +} +static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_r(void) +{ + return 0x00419d0cU; +} +static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f(void) +{ + return 0x2U; +} +static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f(void) +{ + return 0x1U; +} +static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_mpc_enabled_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_r(void) +{ + return 0x0050450cU; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(void) +{ + return 0x2U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_mpc_enabled_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpcs_gpccs_gpc_exception_en_r(void) +{ + return 0x0041ac94U; +} +static inline u32 gr_gpcs_gpccs_gpc_exception_en_gcc_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 gr_gpcs_gpccs_gpc_exception_en_tpc_f(u32 v) +{ + return (v & 0xffU) << 16U; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_r(void) +{ + return 0x00502c90U; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_gcc_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_v(u32 r) +{ + return (r >> 16U) & 0xffU; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_0_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_r(void) +{ + return 0x00504508U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_pending_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_r(void) +{ + return 0x00504704U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_f(void) +{ + return 0x1U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_disable_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_m(void) +{ + return 0x1U << 3U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f(void) +{ + return 0x8U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_disable_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_run_trigger_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_gpc0_tpc0_sm0_warp_valid_mask_0_r(void) +{ + return 0x00504708U; +} +static inline u32 gr_gpc0_tpc0_sm0_warp_valid_mask_1_r(void) +{ + return 0x0050470cU; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_0_r(void) +{ + return 0x00504710U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_1_r(void) +{ + return 0x00504714U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_0_r(void) +{ + return 0x00504718U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_1_r(void) +{ + return 0x0050471cU; +} +static inline u32 gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_0_r(void) +{ + return 0x00419e90U; +} +static inline u32 gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_1_r(void) +{ + return 0x00419e94U; +} +static inline u32 gr_gpcs_tpcs_sms_dbgr_status0_r(void) +{ + return 0x00419e80U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_r(void) +{ + return 0x00504700U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_sm_in_trap_mode_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_true_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_r(void) +{ + return 0x00504730U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_wrap_id_m(void) +{ + return 0xffU << 16U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_m(void) +{ + return 0xfU << 24U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_none_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r(void) +{ + return 0x0050460cU; +} +static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_pc_r(void) +{ + return 0x00504738U; +} +static inline u32 gr_gpc0_tpc0_sm_halfctl_ctrl_r(void) +{ + return 0x005043a0U; +} +static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_r(void) +{ + return 0x00419ba0U; +} +static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 gr_gpc0_tpc0_sm_debug_sfe_control_r(void) +{ + return 0x005043b0U; +} +static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_r(void) +{ + return 0x00419bb0U; +} +static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_r(void) +{ + return 0x0041be08U; +} +static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f(void) +{ + return 0x4U; +} +static inline u32 gr_ppcs_wwdx_map_gpc_map_r(u32 i) +{ + return 0x0041bf00U + i*4U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_r(void) +{ + return 0x0041bfd0U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_row_offset_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_num_entries_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_num_entries_f(u32 v) +{ + return (v & 0x1fU) << 16U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_shift_value_f(u32 v) +{ + return (v & 0x7U) << 21U; +} +static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_r(void) +{ + return 0x0041bfd4U; +} +static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_conservative_f(u32 v) +{ + return (v & 0xffffffU) << 0U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_r(u32 i) +{ + return 0x0041bfb0U + i*4U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff__size_1_v(void) +{ + return 0x00000005U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_0_mod_value_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_1_mod_value_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_2_mod_value_f(u32 v) +{ + return (v & 0xffU) << 16U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_3_mod_value_f(u32 v) +{ + return (v & 0xffU) << 24U; +} +static inline u32 gr_bes_zrop_settings_r(void) +{ + return 0x00408850U; +} +static inline u32 gr_bes_zrop_settings_num_active_ltcs_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_be0_crop_debug3_r(void) +{ + return 0x00410108U; +} +static inline u32 gr_bes_crop_debug3_r(void) +{ + return 0x00408908U; +} +static inline u32 gr_bes_crop_debug3_comp_vdc_4to2_disable_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_enabled_f(void) +{ + return 0x2U; +} +static inline u32 gr_bes_crop_debug3_blendopt_fill_override_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_bes_crop_debug3_blendopt_fill_override_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_bes_crop_debug3_blendopt_fill_override_enabled_f(void) +{ + return 0x4U; +} +static inline u32 gr_bes_crop_settings_r(void) +{ + return 0x00408958U; +} +static inline u32 gr_bes_crop_settings_num_active_ltcs_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_zcull_bytes_per_aliquot_per_gpu_v(void) +{ + return 0x00000020U; +} +static inline u32 gr_zcull_save_restore_header_bytes_per_gpc_v(void) +{ + return 0x00000020U; +} +static inline u32 gr_zcull_save_restore_subregion_header_bytes_per_gpc_v(void) +{ + return 0x000000c0U; +} +static inline u32 gr_zcull_subregion_qty_v(void) +{ + return 0x00000010U; +} +static inline u32 gr_gpcs_tpcs_tex_in_dbg_r(void) +{ + return 0x00419a00U; +} +static inline u32 gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_f(u32 v) +{ + return (v & 0x1U) << 19U; +} +static inline u32 gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_m(void) +{ + return 0x1U << 19U; +} +static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_r(void) +{ + return 0x00419bf0U; +} +static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_m(void) +{ + return 0x1U << 5U; +} +static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_f(u32 v) +{ + return (v & 0x1U) << 10U; +} +static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_m(void) +{ + return 0x1U << 10U; +} +static inline u32 gr_fe_pwr_mode_r(void) +{ + return 0x00404170U; +} +static inline u32 gr_fe_pwr_mode_mode_auto_f(void) +{ + return 0x0U; +} +static inline u32 gr_fe_pwr_mode_mode_force_on_f(void) +{ + return 0x2U; +} +static inline u32 gr_fe_pwr_mode_req_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 gr_fe_pwr_mode_req_send_f(void) +{ + return 0x10U; +} +static inline u32 gr_fe_pwr_mode_req_done_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_r(void) +{ + return 0x00418880U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_vm_pg_size_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_use_pdb_big_page_size_m(void) +{ + return 0x1U << 11U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_vol_fault_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_comp_fault_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_miss_gran_m(void) +{ + return 0x3U << 3U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_cache_mode_m(void) +{ + return 0x3U << 5U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_aperture_m(void) +{ + return 0x3U << 28U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_vol_m(void) +{ + return 0x1U << 30U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_disable_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_gpcs_pri_mmu_pm_unit_mask_r(void) +{ + return 0x00418890U; +} +static inline u32 gr_gpcs_pri_mmu_pm_req_mask_r(void) +{ + return 0x00418894U; +} +static inline u32 gr_gpcs_pri_mmu_debug_ctrl_r(void) +{ + return 0x004188b0U; +} +static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_v(u32 r) +{ + return (r >> 16U) & 0x1U; +} +static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpcs_pri_mmu_debug_wr_r(void) +{ + return 0x004188b4U; +} +static inline u32 gr_gpcs_pri_mmu_debug_rd_r(void) +{ + return 0x004188b8U; +} +static inline u32 gr_gpcs_mmu_num_active_ltcs_r(void) +{ + return 0x004188acU; +} +static inline u32 gr_gpcs_tpcs_sms_dbgr_control0_r(void) +{ + return 0x00419e84U; +} +static inline u32 gr_fe_gfxp_wfi_timeout_r(void) +{ + return 0x004041c0U; +} +static inline u32 gr_fe_gfxp_wfi_timeout_count_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_fe_gfxp_wfi_timeout_count_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpcs_tpcs_sm_texio_control_r(void) +{ + return 0x00419bd8U; +} +static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_f(u32 v) +{ + return (v & 0x7U) << 8U; +} +static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(void) +{ + return 0x7U << 8U; +} +static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_arm_63_48_match_f(void) +{ + return 0x100U; +} +static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_r(void) +{ + return 0x00419ba4U; +} +static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_m(void) +{ + return 0x3U << 11U; +} +static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_disable_f(void) +{ + return 0x1000U; +} +static inline u32 gr_gpcs_tc_debug0_r(void) +{ + return 0x00418708U; +} +static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(u32 v) +{ + return (v & 0x1ffU) << 0U; +} +static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(void) +{ + return 0x1ffU << 0U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ltc_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ltc_gv100.h new file mode 100644 index 000000000..3543f0b75 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ltc_gv100.h @@ -0,0 +1,619 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_ltc_gv100_h_ +#define _hw_ltc_gv100_h_ + +static inline u32 ltc_pltcg_base_v(void) +{ + return 0x00140000U; +} +static inline u32 ltc_pltcg_extent_v(void) +{ + return 0x0017ffffU; +} +static inline u32 ltc_ltc0_ltss_v(void) +{ + return 0x00140200U; +} +static inline u32 ltc_ltc0_lts0_v(void) +{ + return 0x00140400U; +} +static inline u32 ltc_ltcs_ltss_v(void) +{ + return 0x0017e200U; +} +static inline u32 ltc_ltcs_lts0_cbc_ctrl1_r(void) +{ + return 0x0014046cU; +} +static inline u32 ltc_ltc0_lts0_dstg_cfg0_r(void) +{ + return 0x00140518U; +} +static inline u32 ltc_ltcs_ltss_dstg_cfg0_r(void) +{ + return 0x0017e318U; +} +static inline u32 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m(void) +{ + return 0x1U << 15U; +} +static inline u32 ltc_ltc0_lts0_tstg_cfg1_r(void) +{ + return 0x00140494U; +} +static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_ways_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_v(u32 r) +{ + return (r >> 16U) & 0x3U; +} +static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v(void) +{ + return 0x00000000U; +} +static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v(void) +{ + return 0x00000002U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl1_r(void) +{ + return 0x0017e26cU; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(void) +{ + return 0x2U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(void) +{ + return 0x4U; +} +static inline u32 ltc_ltc0_lts0_cbc_ctrl1_r(void) +{ + return 0x0014046cU; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl2_r(void) +{ + return 0x0017e270U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(u32 v) +{ + return (v & 0x3ffffU) << 0U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl3_r(void) +{ + return 0x0017e274U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(u32 v) +{ + return (v & 0x3ffffU) << 0U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(void) +{ + return 0x0003ffffU; +} +static inline u32 ltc_ltcs_ltss_cbc_base_r(void) +{ + return 0x0017e278U; +} +static inline u32 ltc_ltcs_ltss_cbc_base_alignment_shift_v(void) +{ + return 0x0000000bU; +} +static inline u32 ltc_ltcs_ltss_cbc_base_address_v(u32 r) +{ + return (r >> 0U) & 0x3ffffffU; +} +static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_r(void) +{ + return 0x0017e27cU; +} +static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs__v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_v(u32 r) +{ + return (r >> 24U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_f(u32 v) +{ + return (v & 0x1U) << 25U; +} +static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_v(u32 r) +{ + return (r >> 25U) & 0x1U; +} +static inline u32 ltc_ltcs_misc_ltc_num_active_ltcs_r(void) +{ + return 0x0017e000U; +} +static inline u32 ltc_ltcs_ltss_cbc_param_r(void) +{ + return 0x0017e280U; +} +static inline u32 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ltc_ltcs_ltss_cbc_param_cache_line_size_v(u32 r) +{ + return (r >> 24U) & 0xfU; +} +static inline u32 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(u32 r) +{ + return (r >> 28U) & 0xfU; +} +static inline u32 ltc_ltcs_ltss_cbc_param2_r(void) +{ + return 0x0017e3f4U; +} +static inline u32 ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_r(void) +{ + return 0x0017e2acU; +} +static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(u32 v) +{ + return (v & 0x1fU) << 16U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_index_r(void) +{ + return 0x0017e338U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i) +{ + return 0x0017e33cU + i*4U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void) +{ + return 0x00000004U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(void) +{ + return 0x0017e34cU; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_s(void) +{ + return 32U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(void) +{ + return 0x0017e204U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_s(void) +{ + return 8U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_m(void) +{ + return 0xffU << 0U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_r(void) +{ + return 0x0017e2b0U; +} +static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(void) +{ + return 0x10000000U; +} +static inline u32 ltc_ltcs_ltss_g_elpg_r(void) +{ + return 0x0017e214U; +} +static inline u32 ltc_ltcs_ltss_g_elpg_flush_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltc0_ltss_g_elpg_r(void) +{ + return 0x00140214U; +} +static inline u32 ltc_ltc0_ltss_g_elpg_flush_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltc1_ltss_g_elpg_r(void) +{ + return 0x00142214U; +} +static inline u32 ltc_ltc1_ltss_g_elpg_flush_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltcs_ltss_intr_r(void) +{ + return 0x0017e20cU; +} +static inline u32 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f(void) +{ + return 0x100U; +} +static inline u32 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f(void) +{ + return 0x200U; +} +static inline u32 ltc_ltcs_ltss_intr_en_evicted_cb_m(void) +{ + return 0x1U << 20U; +} +static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(void) +{ + return 0x1U << 30U; +} +static inline u32 ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f(void) +{ + return 0x1000000U; +} +static inline u32 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(void) +{ + return 0x2000000U; +} +static inline u32 ltc_ltc0_lts0_intr_r(void) +{ + return 0x0014040cU; +} +static inline u32 ltc_ltc0_lts0_dstg_ecc_report_r(void) +{ + return 0x0014051cU; +} +static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_m(void) +{ + return 0xffU << 0U; +} +static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_m(void) +{ + return 0xffU << 16U; +} +static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(u32 r) +{ + return (r >> 16U) & 0xffU; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_r(void) +{ + return 0x0017e2a0U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_v(u32 r) +{ + return (r >> 8U) & 0xfU; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_v(void) +{ + return 0x00000003U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f(void) +{ + return 0x300U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_v(u32 r) +{ + return (r >> 28U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f(void) +{ + return 0x10000000U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_v(u32 r) +{ + return (r >> 29U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f(void) +{ + return 0x20000000U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f(void) +{ + return 0x40000000U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_r(void) +{ + return 0x0017e2a4U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_v(u32 r) +{ + return (r >> 8U) & 0xfU; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_v(void) +{ + return 0x00000003U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f(void) +{ + return 0x300U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_v(u32 r) +{ + return (r >> 16U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f(void) +{ + return 0x10000U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_v(u32 r) +{ + return (r >> 28U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f(void) +{ + return 0x10000000U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_v(u32 r) +{ + return (r >> 29U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f(void) +{ + return 0x20000000U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f(void) +{ + return 0x40000000U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_r(void) +{ + return 0x001402a0U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_r(void) +{ + return 0x001402a4U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_r(void) +{ + return 0x001422a0U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_r(void) +{ + return 0x001422a4U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltc0_lts0_tstg_info_1_r(void) +{ + return 0x0014058cU; +} +static inline u32 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(u32 r) +{ + return (r >> 16U) & 0x1fU; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_mc_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_mc_gv100.h new file mode 100644 index 000000000..f367991e2 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_mc_gv100.h @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_mc_gv100_h_ +#define _hw_mc_gv100_h_ + +static inline u32 mc_boot_0_r(void) +{ + return 0x00000000U; +} +static inline u32 mc_boot_0_architecture_v(u32 r) +{ + return (r >> 24U) & 0x1fU; +} +static inline u32 mc_boot_0_implementation_v(u32 r) +{ + return (r >> 20U) & 0xfU; +} +static inline u32 mc_boot_0_major_revision_v(u32 r) +{ + return (r >> 4U) & 0xfU; +} +static inline u32 mc_boot_0_minor_revision_v(u32 r) +{ + return (r >> 0U) & 0xfU; +} +static inline u32 mc_intr_r(u32 i) +{ + return 0x00000100U + i*4U; +} +static inline u32 mc_intr_pfifo_pending_f(void) +{ + return 0x100U; +} +static inline u32 mc_intr_hub_pending_f(void) +{ + return 0x200U; +} +static inline u32 mc_intr_pgraph_pending_f(void) +{ + return 0x1000U; +} +static inline u32 mc_intr_pmu_pending_f(void) +{ + return 0x1000000U; +} +static inline u32 mc_intr_ltc_pending_f(void) +{ + return 0x2000000U; +} +static inline u32 mc_intr_priv_ring_pending_f(void) +{ + return 0x40000000U; +} +static inline u32 mc_intr_pbus_pending_f(void) +{ + return 0x10000000U; +} +static inline u32 mc_intr_en_r(u32 i) +{ + return 0x00000140U + i*4U; +} +static inline u32 mc_intr_en_set_r(u32 i) +{ + return 0x00000160U + i*4U; +} +static inline u32 mc_intr_en_clear_r(u32 i) +{ + return 0x00000180U + i*4U; +} +static inline u32 mc_enable_r(void) +{ + return 0x00000200U; +} +static inline u32 mc_enable_xbar_enabled_f(void) +{ + return 0x4U; +} +static inline u32 mc_enable_l2_enabled_f(void) +{ + return 0x8U; +} +static inline u32 mc_enable_pmedia_s(void) +{ + return 1U; +} +static inline u32 mc_enable_pmedia_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 mc_enable_pmedia_m(void) +{ + return 0x1U << 4U; +} +static inline u32 mc_enable_pmedia_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 mc_enable_ce0_m(void) +{ + return 0x1U << 6U; +} +static inline u32 mc_enable_pfifo_enabled_f(void) +{ + return 0x100U; +} +static inline u32 mc_enable_pgraph_enabled_f(void) +{ + return 0x1000U; +} +static inline u32 mc_enable_pwr_v(u32 r) +{ + return (r >> 13U) & 0x1U; +} +static inline u32 mc_enable_pwr_disabled_v(void) +{ + return 0x00000000U; +} +static inline u32 mc_enable_pwr_enabled_f(void) +{ + return 0x2000U; +} +static inline u32 mc_enable_pfb_enabled_f(void) +{ + return 0x100000U; +} +static inline u32 mc_enable_ce2_m(void) +{ + return 0x1U << 21U; +} +static inline u32 mc_enable_ce2_enabled_f(void) +{ + return 0x200000U; +} +static inline u32 mc_enable_blg_enabled_f(void) +{ + return 0x8000000U; +} +static inline u32 mc_enable_perfmon_enabled_f(void) +{ + return 0x10000000U; +} +static inline u32 mc_enable_hub_enabled_f(void) +{ + return 0x20000000U; +} +static inline u32 mc_enable_nvdec_disabled_v(void) +{ + return 0x00000000U; +} +static inline u32 mc_enable_nvdec_enabled_f(void) +{ + return 0x8000U; +} +static inline u32 mc_intr_ltc_r(void) +{ + return 0x000001c0U; +} +static inline u32 mc_enable_pb_r(void) +{ + return 0x00000204U; +} +static inline u32 mc_enable_pb_0_s(void) +{ + return 1U; +} +static inline u32 mc_enable_pb_0_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 mc_enable_pb_0_m(void) +{ + return 0x1U << 0U; +} +static inline u32 mc_enable_pb_0_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 mc_enable_pb_0_enabled_v(void) +{ + return 0x00000001U; +} +static inline u32 mc_enable_pb_sel_f(u32 v, u32 i) +{ + return (v & 0x1U) << (0U + i*1U); +} +static inline u32 mc_elpg_enable_r(void) +{ + return 0x0000020cU; +} +static inline u32 mc_elpg_enable_xbar_enabled_f(void) +{ + return 0x4U; +} +static inline u32 mc_elpg_enable_pfb_enabled_f(void) +{ + return 0x100000U; +} +static inline u32 mc_elpg_enable_hub_enabled_f(void) +{ + return 0x20000000U; +} +static inline u32 mc_elpg_enable_l2_enabled_f(void) +{ + return 0x8U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pbdma_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pbdma_gv100.h new file mode 100644 index 000000000..66a0737c6 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pbdma_gv100.h @@ -0,0 +1,659 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_pbdma_gv100_h_ +#define _hw_pbdma_gv100_h_ + +static inline u32 pbdma_gp_entry1_r(void) +{ + return 0x10000004U; +} +static inline u32 pbdma_gp_entry1_get_hi_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 pbdma_gp_entry1_length_f(u32 v) +{ + return (v & 0x1fffffU) << 10U; +} +static inline u32 pbdma_gp_entry1_length_v(u32 r) +{ + return (r >> 10U) & 0x1fffffU; +} +static inline u32 pbdma_gp_base_r(u32 i) +{ + return 0x00040048U + i*8192U; +} +static inline u32 pbdma_gp_base__size_1_v(void) +{ + return 0x0000000eU; +} +static inline u32 pbdma_gp_base_offset_f(u32 v) +{ + return (v & 0x1fffffffU) << 3U; +} +static inline u32 pbdma_gp_base_rsvd_s(void) +{ + return 3U; +} +static inline u32 pbdma_gp_base_hi_r(u32 i) +{ + return 0x0004004cU + i*8192U; +} +static inline u32 pbdma_gp_base_hi_offset_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 pbdma_gp_base_hi_limit2_f(u32 v) +{ + return (v & 0x1fU) << 16U; +} +static inline u32 pbdma_gp_fetch_r(u32 i) +{ + return 0x00040050U + i*8192U; +} +static inline u32 pbdma_gp_get_r(u32 i) +{ + return 0x00040014U + i*8192U; +} +static inline u32 pbdma_gp_put_r(u32 i) +{ + return 0x00040000U + i*8192U; +} +static inline u32 pbdma_pb_fetch_r(u32 i) +{ + return 0x00040054U + i*8192U; +} +static inline u32 pbdma_pb_fetch_hi_r(u32 i) +{ + return 0x00040058U + i*8192U; +} +static inline u32 pbdma_get_r(u32 i) +{ + return 0x00040018U + i*8192U; +} +static inline u32 pbdma_get_hi_r(u32 i) +{ + return 0x0004001cU + i*8192U; +} +static inline u32 pbdma_put_r(u32 i) +{ + return 0x0004005cU + i*8192U; +} +static inline u32 pbdma_put_hi_r(u32 i) +{ + return 0x00040060U + i*8192U; +} +static inline u32 pbdma_pb_header_r(u32 i) +{ + return 0x00040084U + i*8192U; +} +static inline u32 pbdma_pb_header_priv_user_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_pb_header_method_zero_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_pb_header_subchannel_zero_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_pb_header_level_main_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_pb_header_first_true_f(void) +{ + return 0x400000U; +} +static inline u32 pbdma_pb_header_type_inc_f(void) +{ + return 0x20000000U; +} +static inline u32 pbdma_pb_header_type_non_inc_f(void) +{ + return 0x60000000U; +} +static inline u32 pbdma_hdr_shadow_r(u32 i) +{ + return 0x00040118U + i*8192U; +} +static inline u32 pbdma_gp_shadow_0_r(u32 i) +{ + return 0x00040110U + i*8192U; +} +static inline u32 pbdma_gp_shadow_1_r(u32 i) +{ + return 0x00040114U + i*8192U; +} +static inline u32 pbdma_subdevice_r(u32 i) +{ + return 0x00040094U + i*8192U; +} +static inline u32 pbdma_subdevice_id_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 pbdma_subdevice_status_active_f(void) +{ + return 0x10000000U; +} +static inline u32 pbdma_subdevice_channel_dma_enable_f(void) +{ + return 0x20000000U; +} +static inline u32 pbdma_method0_r(u32 i) +{ + return 0x000400c0U + i*8192U; +} +static inline u32 pbdma_method0_fifo_size_v(void) +{ + return 0x00000004U; +} +static inline u32 pbdma_method0_addr_f(u32 v) +{ + return (v & 0xfffU) << 2U; +} +static inline u32 pbdma_method0_addr_v(u32 r) +{ + return (r >> 2U) & 0xfffU; +} +static inline u32 pbdma_method0_subch_v(u32 r) +{ + return (r >> 16U) & 0x7U; +} +static inline u32 pbdma_method0_first_true_f(void) +{ + return 0x400000U; +} +static inline u32 pbdma_method0_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 pbdma_method1_r(u32 i) +{ + return 0x000400c8U + i*8192U; +} +static inline u32 pbdma_method2_r(u32 i) +{ + return 0x000400d0U + i*8192U; +} +static inline u32 pbdma_method3_r(u32 i) +{ + return 0x000400d8U + i*8192U; +} +static inline u32 pbdma_data0_r(u32 i) +{ + return 0x000400c4U + i*8192U; +} +static inline u32 pbdma_acquire_r(u32 i) +{ + return 0x00040030U + i*8192U; +} +static inline u32 pbdma_acquire_retry_man_2_f(void) +{ + return 0x2U; +} +static inline u32 pbdma_acquire_retry_exp_2_f(void) +{ + return 0x100U; +} +static inline u32 pbdma_acquire_timeout_exp_f(u32 v) +{ + return (v & 0xfU) << 11U; +} +static inline u32 pbdma_acquire_timeout_exp_max_v(void) +{ + return 0x0000000fU; +} +static inline u32 pbdma_acquire_timeout_exp_max_f(void) +{ + return 0x7800U; +} +static inline u32 pbdma_acquire_timeout_man_f(u32 v) +{ + return (v & 0xffffU) << 15U; +} +static inline u32 pbdma_acquire_timeout_man_max_v(void) +{ + return 0x0000ffffU; +} +static inline u32 pbdma_acquire_timeout_man_max_f(void) +{ + return 0x7fff8000U; +} +static inline u32 pbdma_acquire_timeout_en_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 pbdma_acquire_timeout_en_disable_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_status_r(u32 i) +{ + return 0x00040100U + i*8192U; +} +static inline u32 pbdma_channel_r(u32 i) +{ + return 0x00040120U + i*8192U; +} +static inline u32 pbdma_signature_r(u32 i) +{ + return 0x00040010U + i*8192U; +} +static inline u32 pbdma_signature_hw_valid_f(void) +{ + return 0xfaceU; +} +static inline u32 pbdma_signature_sw_zero_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_userd_r(u32 i) +{ + return 0x00040008U + i*8192U; +} +static inline u32 pbdma_userd_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_userd_target_sys_mem_coh_f(void) +{ + return 0x2U; +} +static inline u32 pbdma_userd_target_sys_mem_ncoh_f(void) +{ + return 0x3U; +} +static inline u32 pbdma_userd_addr_f(u32 v) +{ + return (v & 0x7fffffU) << 9U; +} +static inline u32 pbdma_config_r(u32 i) +{ + return 0x000400f4U + i*8192U; +} +static inline u32 pbdma_config_l2_evict_first_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_config_l2_evict_normal_f(void) +{ + return 0x1U; +} +static inline u32 pbdma_config_l2_evict_last_f(void) +{ + return 0x2U; +} +static inline u32 pbdma_config_ce_split_enable_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_config_ce_split_disable_f(void) +{ + return 0x10U; +} +static inline u32 pbdma_config_auth_level_non_privileged_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_config_auth_level_privileged_f(void) +{ + return 0x100U; +} +static inline u32 pbdma_config_userd_writeback_disable_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_config_userd_writeback_enable_f(void) +{ + return 0x1000U; +} +static inline u32 pbdma_userd_hi_r(u32 i) +{ + return 0x0004000cU + i*8192U; +} +static inline u32 pbdma_userd_hi_addr_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 pbdma_hce_ctrl_r(u32 i) +{ + return 0x000400e4U + i*8192U; +} +static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void) +{ + return 0x20U; +} +static inline u32 pbdma_intr_0_r(u32 i) +{ + return 0x00040108U + i*8192U; +} +static inline u32 pbdma_intr_0_memreq_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 pbdma_intr_0_memreq_pending_f(void) +{ + return 0x1U; +} +static inline u32 pbdma_intr_0_memack_timeout_pending_f(void) +{ + return 0x2U; +} +static inline u32 pbdma_intr_0_memack_extra_pending_f(void) +{ + return 0x4U; +} +static inline u32 pbdma_intr_0_memdat_timeout_pending_f(void) +{ + return 0x8U; +} +static inline u32 pbdma_intr_0_memdat_extra_pending_f(void) +{ + return 0x10U; +} +static inline u32 pbdma_intr_0_memflush_pending_f(void) +{ + return 0x20U; +} +static inline u32 pbdma_intr_0_memop_pending_f(void) +{ + return 0x40U; +} +static inline u32 pbdma_intr_0_lbconnect_pending_f(void) +{ + return 0x80U; +} +static inline u32 pbdma_intr_0_lbreq_pending_f(void) +{ + return 0x100U; +} +static inline u32 pbdma_intr_0_lback_timeout_pending_f(void) +{ + return 0x200U; +} +static inline u32 pbdma_intr_0_lback_extra_pending_f(void) +{ + return 0x400U; +} +static inline u32 pbdma_intr_0_lbdat_timeout_pending_f(void) +{ + return 0x800U; +} +static inline u32 pbdma_intr_0_lbdat_extra_pending_f(void) +{ + return 0x1000U; +} +static inline u32 pbdma_intr_0_gpfifo_pending_f(void) +{ + return 0x2000U; +} +static inline u32 pbdma_intr_0_gpptr_pending_f(void) +{ + return 0x4000U; +} +static inline u32 pbdma_intr_0_gpentry_pending_f(void) +{ + return 0x8000U; +} +static inline u32 pbdma_intr_0_gpcrc_pending_f(void) +{ + return 0x10000U; +} +static inline u32 pbdma_intr_0_pbptr_pending_f(void) +{ + return 0x20000U; +} +static inline u32 pbdma_intr_0_pbentry_pending_f(void) +{ + return 0x40000U; +} +static inline u32 pbdma_intr_0_pbcrc_pending_f(void) +{ + return 0x80000U; +} +static inline u32 pbdma_intr_0_clear_faulted_error_pending_f(void) +{ + return 0x100000U; +} +static inline u32 pbdma_intr_0_method_pending_f(void) +{ + return 0x200000U; +} +static inline u32 pbdma_intr_0_methodcrc_pending_f(void) +{ + return 0x400000U; +} +static inline u32 pbdma_intr_0_device_pending_f(void) +{ + return 0x800000U; +} +static inline u32 pbdma_intr_0_eng_reset_pending_f(void) +{ + return 0x1000000U; +} +static inline u32 pbdma_intr_0_semaphore_pending_f(void) +{ + return 0x2000000U; +} +static inline u32 pbdma_intr_0_acquire_pending_f(void) +{ + return 0x4000000U; +} +static inline u32 pbdma_intr_0_pri_pending_f(void) +{ + return 0x8000000U; +} +static inline u32 pbdma_intr_0_no_ctxsw_seg_pending_f(void) +{ + return 0x20000000U; +} +static inline u32 pbdma_intr_0_pbseg_pending_f(void) +{ + return 0x40000000U; +} +static inline u32 pbdma_intr_0_signature_pending_f(void) +{ + return 0x80000000U; +} +static inline u32 pbdma_intr_1_r(u32 i) +{ + return 0x00040148U + i*8192U; +} +static inline u32 pbdma_intr_1_ctxnotvalid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 pbdma_intr_1_ctxnotvalid_pending_f(void) +{ + return 0x80000000U; +} +static inline u32 pbdma_intr_en_0_r(u32 i) +{ + return 0x0004010cU + i*8192U; +} +static inline u32 pbdma_intr_en_0_lbreq_enabled_f(void) +{ + return 0x100U; +} +static inline u32 pbdma_intr_en_1_r(u32 i) +{ + return 0x0004014cU + i*8192U; +} +static inline u32 pbdma_intr_stall_r(u32 i) +{ + return 0x0004013cU + i*8192U; +} +static inline u32 pbdma_intr_stall_lbreq_enabled_f(void) +{ + return 0x100U; +} +static inline u32 pbdma_intr_stall_1_r(u32 i) +{ + return 0x00040140U + i*8192U; +} +static inline u32 pbdma_udma_nop_r(void) +{ + return 0x00000008U; +} +static inline u32 pbdma_runlist_timeslice_r(u32 i) +{ + return 0x000400f8U + i*8192U; +} +static inline u32 pbdma_runlist_timeslice_timeout_128_f(void) +{ + return 0x80U; +} +static inline u32 pbdma_runlist_timeslice_timescale_3_f(void) +{ + return 0x3000U; +} +static inline u32 pbdma_runlist_timeslice_enable_true_f(void) +{ + return 0x10000000U; +} +static inline u32 pbdma_target_r(u32 i) +{ + return 0x000400acU + i*8192U; +} +static inline u32 pbdma_target_engine_sw_f(void) +{ + return 0x1fU; +} +static inline u32 pbdma_target_eng_ctx_valid_true_f(void) +{ + return 0x10000U; +} +static inline u32 pbdma_target_eng_ctx_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_target_ce_ctx_valid_true_f(void) +{ + return 0x20000U; +} +static inline u32 pbdma_target_ce_ctx_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_target_host_tsg_event_reason_pbdma_idle_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_target_host_tsg_event_reason_semaphore_acquire_failure_f(void) +{ + return 0x1000000U; +} +static inline u32 pbdma_target_host_tsg_event_reason_tsg_yield_f(void) +{ + return 0x2000000U; +} +static inline u32 pbdma_target_host_tsg_event_reason_host_subchannel_switch_f(void) +{ + return 0x3000000U; +} +static inline u32 pbdma_target_should_send_tsg_event_true_f(void) +{ + return 0x20000000U; +} +static inline u32 pbdma_target_should_send_tsg_event_false_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_target_needs_host_tsg_event_true_f(void) +{ + return 0x80000000U; +} +static inline u32 pbdma_target_needs_host_tsg_event_false_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_set_channel_info_r(u32 i) +{ + return 0x000400fcU + i*8192U; +} +static inline u32 pbdma_set_channel_info_scg_type_graphics_compute0_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_set_channel_info_scg_type_compute1_f(void) +{ + return 0x1U; +} +static inline u32 pbdma_set_channel_info_veid_f(u32 v) +{ + return (v & 0x3fU) << 8U; +} +static inline u32 pbdma_timeout_r(u32 i) +{ + return 0x0004012cU + i*8192U; +} +static inline u32 pbdma_timeout_period_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 pbdma_timeout_period_max_f(void) +{ + return 0xffffffffU; +} +static inline u32 pbdma_timeout_period_init_f(void) +{ + return 0x10000U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_perf_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_perf_gv100.h new file mode 100644 index 000000000..4fbe37cbf --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_perf_gv100.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_perf_gv100_h_ +#define _hw_perf_gv100_h_ + +static inline u32 perf_pmasys_control_r(void) +{ + return 0x0024a000U; +} +static inline u32 perf_pmasys_control_membuf_status_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 perf_pmasys_control_membuf_status_overflowed_v(void) +{ + return 0x00000001U; +} +static inline u32 perf_pmasys_control_membuf_status_overflowed_f(void) +{ + return 0x10U; +} +static inline u32 perf_pmasys_control_membuf_clear_status_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 perf_pmasys_control_membuf_clear_status_v(u32 r) +{ + return (r >> 5U) & 0x1U; +} +static inline u32 perf_pmasys_control_membuf_clear_status_doit_v(void) +{ + return 0x00000001U; +} +static inline u32 perf_pmasys_control_membuf_clear_status_doit_f(void) +{ + return 0x20U; +} +static inline u32 perf_pmasys_mem_block_r(void) +{ + return 0x0024a070U; +} +static inline u32 perf_pmasys_mem_block_base_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 perf_pmasys_mem_block_target_f(u32 v) +{ + return (v & 0x3U) << 28U; +} +static inline u32 perf_pmasys_mem_block_target_v(u32 r) +{ + return (r >> 28U) & 0x3U; +} +static inline u32 perf_pmasys_mem_block_target_lfb_v(void) +{ + return 0x00000000U; +} +static inline u32 perf_pmasys_mem_block_target_lfb_f(void) +{ + return 0x0U; +} +static inline u32 perf_pmasys_mem_block_target_sys_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 perf_pmasys_mem_block_target_sys_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 perf_pmasys_mem_block_target_sys_ncoh_v(void) +{ + return 0x00000003U; +} +static inline u32 perf_pmasys_mem_block_target_sys_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 perf_pmasys_mem_block_valid_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 perf_pmasys_mem_block_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 perf_pmasys_mem_block_valid_true_v(void) +{ + return 0x00000001U; +} +static inline u32 perf_pmasys_mem_block_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 perf_pmasys_mem_block_valid_false_v(void) +{ + return 0x00000000U; +} +static inline u32 perf_pmasys_mem_block_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 perf_pmasys_outbase_r(void) +{ + return 0x0024a074U; +} +static inline u32 perf_pmasys_outbase_ptr_f(u32 v) +{ + return (v & 0x7ffffffU) << 5U; +} +static inline u32 perf_pmasys_outbaseupper_r(void) +{ + return 0x0024a078U; +} +static inline u32 perf_pmasys_outbaseupper_ptr_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 perf_pmasys_outsize_r(void) +{ + return 0x0024a07cU; +} +static inline u32 perf_pmasys_outsize_numbytes_f(u32 v) +{ + return (v & 0x7ffffffU) << 5U; +} +static inline u32 perf_pmasys_mem_bytes_r(void) +{ + return 0x0024a084U; +} +static inline u32 perf_pmasys_mem_bytes_numbytes_f(u32 v) +{ + return (v & 0xfffffffU) << 4U; +} +static inline u32 perf_pmasys_mem_bump_r(void) +{ + return 0x0024a088U; +} +static inline u32 perf_pmasys_mem_bump_numbytes_f(u32 v) +{ + return (v & 0xfffffffU) << 4U; +} +static inline u32 perf_pmasys_enginestatus_r(void) +{ + return 0x0024a0a4U; +} +static inline u32 perf_pmasys_enginestatus_rbufempty_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 perf_pmasys_enginestatus_rbufempty_empty_v(void) +{ + return 0x00000001U; +} +static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void) +{ + return 0x10U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pram_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pram_gv100.h new file mode 100644 index 000000000..8f005a22f --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pram_gv100.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_pram_gv100_h_ +#define _hw_pram_gv100_h_ + +static inline u32 pram_data032_r(u32 i) +{ + return 0x00700000U + i*4U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringmaster_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringmaster_gv100.h new file mode 100644 index 000000000..5eca93cc6 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringmaster_gv100.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_pri_ringmaster_gv100_h_ +#define _hw_pri_ringmaster_gv100_h_ + +static inline u32 pri_ringmaster_command_r(void) +{ + return 0x0012004cU; +} +static inline u32 pri_ringmaster_command_cmd_m(void) +{ + return 0x3fU << 0U; +} +static inline u32 pri_ringmaster_command_cmd_v(u32 r) +{ + return (r >> 0U) & 0x3fU; +} +static inline u32 pri_ringmaster_command_cmd_no_cmd_v(void) +{ + return 0x00000000U; +} +static inline u32 pri_ringmaster_command_cmd_start_ring_f(void) +{ + return 0x1U; +} +static inline u32 pri_ringmaster_command_cmd_ack_interrupt_f(void) +{ + return 0x2U; +} +static inline u32 pri_ringmaster_command_cmd_enumerate_stations_f(void) +{ + return 0x3U; +} +static inline u32 pri_ringmaster_command_cmd_enumerate_stations_bc_grp_all_f(void) +{ + return 0x0U; +} +static inline u32 pri_ringmaster_command_data_r(void) +{ + return 0x00120048U; +} +static inline u32 pri_ringmaster_start_results_r(void) +{ + return 0x00120050U; +} +static inline u32 pri_ringmaster_start_results_connectivity_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 pri_ringmaster_start_results_connectivity_pass_v(void) +{ + return 0x00000001U; +} +static inline u32 pri_ringmaster_intr_status0_r(void) +{ + return 0x00120058U; +} +static inline u32 pri_ringmaster_intr_status0_ring_start_conn_fault_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 pri_ringmaster_intr_status0_disconnect_fault_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 pri_ringmaster_intr_status0_overflow_fault_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 pri_ringmaster_intr_status0_gbl_write_error_sys_v(u32 r) +{ + return (r >> 8U) & 0x1U; +} +static inline u32 pri_ringmaster_intr_status1_r(void) +{ + return 0x0012005cU; +} +static inline u32 pri_ringmaster_global_ctl_r(void) +{ + return 0x00120060U; +} +static inline u32 pri_ringmaster_global_ctl_ring_reset_asserted_f(void) +{ + return 0x1U; +} +static inline u32 pri_ringmaster_global_ctl_ring_reset_deasserted_f(void) +{ + return 0x0U; +} +static inline u32 pri_ringmaster_enum_fbp_r(void) +{ + return 0x00120074U; +} +static inline u32 pri_ringmaster_enum_fbp_count_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 pri_ringmaster_enum_gpc_r(void) +{ + return 0x00120078U; +} +static inline u32 pri_ringmaster_enum_gpc_count_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 pri_ringmaster_enum_ltc_r(void) +{ + return 0x0012006cU; +} +static inline u32 pri_ringmaster_enum_ltc_count_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_gpc_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_gpc_gv100.h new file mode 100644 index 000000000..fc522d51b --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_gpc_gv100.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_pri_ringstation_gpc_gv100_h_ +#define _hw_pri_ringstation_gpc_gv100_h_ + +static inline u32 pri_ringstation_gpc_master_config_r(u32 i) +{ + return 0x00128300U + i*4U; +} +static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void) +{ + return 0x00128120U; +} +static inline u32 pri_ringstation_gpc_gpc0_priv_error_wrdat_r(void) +{ + return 0x00128124U; +} +static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_r(void) +{ + return 0x00128128U; +} +static inline u32 pri_ringstation_gpc_gpc0_priv_error_code_r(void) +{ + return 0x0012812cU; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_sys_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_sys_gv100.h new file mode 100644 index 000000000..885ea30a5 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pri_ringstation_sys_gv100.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_pri_ringstation_sys_gv100_h_ +#define _hw_pri_ringstation_sys_gv100_h_ + +static inline u32 pri_ringstation_sys_master_config_r(u32 i) +{ + return 0x00122300U + i*4U; +} +static inline u32 pri_ringstation_sys_decode_config_r(void) +{ + return 0x00122204U; +} +static inline u32 pri_ringstation_sys_decode_config_ring_m(void) +{ + return 0x7U << 0U; +} +static inline u32 pri_ringstation_sys_decode_config_ring_drop_on_ring_not_started_f(void) +{ + return 0x1U; +} +static inline u32 pri_ringstation_sys_priv_error_adr_r(void) +{ + return 0x00122120U; +} +static inline u32 pri_ringstation_sys_priv_error_wrdat_r(void) +{ + return 0x00122124U; +} +static inline u32 pri_ringstation_sys_priv_error_info_r(void) +{ + return 0x00122128U; +} +static inline u32 pri_ringstation_sys_priv_error_code_r(void) +{ + return 0x0012212cU; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_proj_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_proj_gv100.h new file mode 100644 index 000000000..dc4c377dc --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_proj_gv100.h @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_proj_gv100_h_ +#define _hw_proj_gv100_h_ + +static inline u32 proj_gpc_base_v(void) +{ + return 0x00500000U; +} +static inline u32 proj_gpc_shared_base_v(void) +{ + return 0x00418000U; +} +static inline u32 proj_gpc_stride_v(void) +{ + return 0x00008000U; +} +static inline u32 proj_ltc_stride_v(void) +{ + return 0x00002000U; +} +static inline u32 proj_lts_stride_v(void) +{ + return 0x00000200U; +} +static inline u32 proj_fbpa_base_v(void) +{ + return 0x00900000U; +} +static inline u32 proj_fbpa_shared_base_v(void) +{ + return 0x009a0000U; +} +static inline u32 proj_fbpa_stride_v(void) +{ + return 0x00004000U; +} +static inline u32 proj_ppc_in_gpc_base_v(void) +{ + return 0x00003000U; +} +static inline u32 proj_ppc_in_gpc_shared_base_v(void) +{ + return 0x00003e00U; +} +static inline u32 proj_ppc_in_gpc_stride_v(void) +{ + return 0x00000200U; +} +static inline u32 proj_rop_base_v(void) +{ + return 0x00410000U; +} +static inline u32 proj_rop_shared_base_v(void) +{ + return 0x00408800U; +} +static inline u32 proj_rop_stride_v(void) +{ + return 0x00000400U; +} +static inline u32 proj_tpc_in_gpc_base_v(void) +{ + return 0x00004000U; +} +static inline u32 proj_tpc_in_gpc_stride_v(void) +{ + return 0x00000800U; +} +static inline u32 proj_tpc_in_gpc_shared_base_v(void) +{ + return 0x00001800U; +} +static inline u32 proj_smpc_base_v(void) +{ + return 0x00000200U; +} +static inline u32 proj_smpc_shared_base_v(void) +{ + return 0x00000300U; +} +static inline u32 proj_smpc_unique_base_v(void) +{ + return 0x00000600U; +} +static inline u32 proj_smpc_stride_v(void) +{ + return 0x00000100U; +} +static inline u32 proj_host_num_engines_v(void) +{ + return 0x0000000fU; +} +static inline u32 proj_host_num_pbdma_v(void) +{ + return 0x0000000eU; +} +static inline u32 proj_scal_litter_num_tpc_per_gpc_v(void) +{ + return 0x00000007U; +} +static inline u32 proj_scal_litter_num_fbps_v(void) +{ + return 0x00000008U; +} +static inline u32 proj_scal_litter_num_fbpas_v(void) +{ + return 0x00000010U; +} +static inline u32 proj_scal_litter_num_gpcs_v(void) +{ + return 0x00000006U; +} +static inline u32 proj_scal_litter_num_pes_per_gpc_v(void) +{ + return 0x00000003U; +} +static inline u32 proj_scal_litter_num_tpcs_per_pes_v(void) +{ + return 0x00000003U; +} +static inline u32 proj_scal_litter_num_zcull_banks_v(void) +{ + return 0x00000004U; +} +static inline u32 proj_scal_litter_num_sm_per_tpc_v(void) +{ + return 0x00000002U; +} +static inline u32 proj_scal_max_gpcs_v(void) +{ + return 0x00000020U; +} +static inline u32 proj_scal_max_tpc_per_gpc_v(void) +{ + return 0x00000008U; +} +static inline u32 proj_sm_stride_v(void) +{ + return 0x00000080U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pwr_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pwr_gv100.h new file mode 100644 index 000000000..4b0b03262 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_pwr_gv100.h @@ -0,0 +1,935 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_pwr_gv100_h_ +#define _hw_pwr_gv100_h_ + +static inline u32 pwr_falcon_irqsset_r(void) +{ + return 0x0010a000U; +} +static inline u32 pwr_falcon_irqsset_swgen0_set_f(void) +{ + return 0x40U; +} +static inline u32 pwr_falcon_irqsclr_r(void) +{ + return 0x0010a004U; +} +static inline u32 pwr_falcon_irqstat_r(void) +{ + return 0x0010a008U; +} +static inline u32 pwr_falcon_irqstat_halt_true_f(void) +{ + return 0x10U; +} +static inline u32 pwr_falcon_irqstat_exterr_true_f(void) +{ + return 0x20U; +} +static inline u32 pwr_falcon_irqstat_swgen0_true_f(void) +{ + return 0x40U; +} +static inline u32 pwr_falcon_irqstat_ext_second_true_f(void) +{ + return 0x800U; +} +static inline u32 pwr_falcon_irqmode_r(void) +{ + return 0x0010a00cU; +} +static inline u32 pwr_falcon_irqmset_r(void) +{ + return 0x0010a010U; +} +static inline u32 pwr_falcon_irqmset_gptmr_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 pwr_falcon_irqmset_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 pwr_falcon_irqmset_mthd_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 pwr_falcon_irqmset_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 pwr_falcon_irqmset_halt_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 pwr_falcon_irqmset_exterr_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 pwr_falcon_irqmset_swgen0_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 pwr_falcon_irqmset_swgen1_f(u32 v) +{ + return (v & 0x1U) << 7U; +} +static inline u32 pwr_falcon_irqmset_ext_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 pwr_falcon_irqmset_ext_ctxe_f(u32 v) +{ + return (v & 0x1U) << 8U; +} +static inline u32 pwr_falcon_irqmset_ext_limitv_f(u32 v) +{ + return (v & 0x1U) << 9U; +} +static inline u32 pwr_falcon_irqmset_ext_second_f(u32 v) +{ + return (v & 0x1U) << 11U; +} +static inline u32 pwr_falcon_irqmset_ext_therm_f(u32 v) +{ + return (v & 0x1U) << 12U; +} +static inline u32 pwr_falcon_irqmset_ext_miscio_f(u32 v) +{ + return (v & 0x1U) << 13U; +} +static inline u32 pwr_falcon_irqmset_ext_rttimer_f(u32 v) +{ + return (v & 0x1U) << 14U; +} +static inline u32 pwr_falcon_irqmclr_r(void) +{ + return 0x0010a014U; +} +static inline u32 pwr_falcon_irqmclr_gptmr_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 pwr_falcon_irqmclr_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 pwr_falcon_irqmclr_mthd_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 pwr_falcon_irqmclr_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 pwr_falcon_irqmclr_halt_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 pwr_falcon_irqmclr_exterr_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 pwr_falcon_irqmclr_swgen0_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 pwr_falcon_irqmclr_swgen1_f(u32 v) +{ + return (v & 0x1U) << 7U; +} +static inline u32 pwr_falcon_irqmclr_ext_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 pwr_falcon_irqmclr_ext_ctxe_f(u32 v) +{ + return (v & 0x1U) << 8U; +} +static inline u32 pwr_falcon_irqmclr_ext_limitv_f(u32 v) +{ + return (v & 0x1U) << 9U; +} +static inline u32 pwr_falcon_irqmclr_ext_second_f(u32 v) +{ + return (v & 0x1U) << 11U; +} +static inline u32 pwr_falcon_irqmclr_ext_therm_f(u32 v) +{ + return (v & 0x1U) << 12U; +} +static inline u32 pwr_falcon_irqmclr_ext_miscio_f(u32 v) +{ + return (v & 0x1U) << 13U; +} +static inline u32 pwr_falcon_irqmclr_ext_rttimer_f(u32 v) +{ + return (v & 0x1U) << 14U; +} +static inline u32 pwr_falcon_irqmask_r(void) +{ + return 0x0010a018U; +} +static inline u32 pwr_falcon_irqdest_r(void) +{ + return 0x0010a01cU; +} +static inline u32 pwr_falcon_irqdest_host_gptmr_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 pwr_falcon_irqdest_host_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 pwr_falcon_irqdest_host_mthd_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 pwr_falcon_irqdest_host_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 pwr_falcon_irqdest_host_halt_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 pwr_falcon_irqdest_host_exterr_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 pwr_falcon_irqdest_host_swgen0_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 pwr_falcon_irqdest_host_swgen1_f(u32 v) +{ + return (v & 0x1U) << 7U; +} +static inline u32 pwr_falcon_irqdest_host_ext_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 pwr_falcon_irqdest_host_ext_ctxe_f(u32 v) +{ + return (v & 0x1U) << 8U; +} +static inline u32 pwr_falcon_irqdest_host_ext_limitv_f(u32 v) +{ + return (v & 0x1U) << 9U; +} +static inline u32 pwr_falcon_irqdest_host_ext_second_f(u32 v) +{ + return (v & 0x1U) << 11U; +} +static inline u32 pwr_falcon_irqdest_host_ext_therm_f(u32 v) +{ + return (v & 0x1U) << 12U; +} +static inline u32 pwr_falcon_irqdest_host_ext_miscio_f(u32 v) +{ + return (v & 0x1U) << 13U; +} +static inline u32 pwr_falcon_irqdest_host_ext_rttimer_f(u32 v) +{ + return (v & 0x1U) << 14U; +} +static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 pwr_falcon_irqdest_target_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 17U; +} +static inline u32 pwr_falcon_irqdest_target_mthd_f(u32 v) +{ + return (v & 0x1U) << 18U; +} +static inline u32 pwr_falcon_irqdest_target_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 19U; +} +static inline u32 pwr_falcon_irqdest_target_halt_f(u32 v) +{ + return (v & 0x1U) << 20U; +} +static inline u32 pwr_falcon_irqdest_target_exterr_f(u32 v) +{ + return (v & 0x1U) << 21U; +} +static inline u32 pwr_falcon_irqdest_target_swgen0_f(u32 v) +{ + return (v & 0x1U) << 22U; +} +static inline u32 pwr_falcon_irqdest_target_swgen1_f(u32 v) +{ + return (v & 0x1U) << 23U; +} +static inline u32 pwr_falcon_irqdest_target_ext_f(u32 v) +{ + return (v & 0xffU) << 24U; +} +static inline u32 pwr_falcon_irqdest_target_ext_ctxe_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 pwr_falcon_irqdest_target_ext_limitv_f(u32 v) +{ + return (v & 0x1U) << 25U; +} +static inline u32 pwr_falcon_irqdest_target_ext_second_f(u32 v) +{ + return (v & 0x1U) << 27U; +} +static inline u32 pwr_falcon_irqdest_target_ext_therm_f(u32 v) +{ + return (v & 0x1U) << 28U; +} +static inline u32 pwr_falcon_irqdest_target_ext_miscio_f(u32 v) +{ + return (v & 0x1U) << 29U; +} +static inline u32 pwr_falcon_irqdest_target_ext_rttimer_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 pwr_falcon_curctx_r(void) +{ + return 0x0010a050U; +} +static inline u32 pwr_falcon_nxtctx_r(void) +{ + return 0x0010a054U; +} +static inline u32 pwr_falcon_mailbox0_r(void) +{ + return 0x0010a040U; +} +static inline u32 pwr_falcon_mailbox1_r(void) +{ + return 0x0010a044U; +} +static inline u32 pwr_falcon_itfen_r(void) +{ + return 0x0010a048U; +} +static inline u32 pwr_falcon_itfen_ctxen_enable_f(void) +{ + return 0x1U; +} +static inline u32 pwr_falcon_idlestate_r(void) +{ + return 0x0010a04cU; +} +static inline u32 pwr_falcon_idlestate_falcon_busy_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 pwr_falcon_idlestate_ext_busy_v(u32 r) +{ + return (r >> 1U) & 0x7fffU; +} +static inline u32 pwr_falcon_os_r(void) +{ + return 0x0010a080U; +} +static inline u32 pwr_falcon_engctl_r(void) +{ + return 0x0010a0a4U; +} +static inline u32 pwr_falcon_cpuctl_r(void) +{ + return 0x0010a100U; +} +static inline u32 pwr_falcon_cpuctl_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 pwr_falcon_cpuctl_halt_intr_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 pwr_falcon_cpuctl_halt_intr_m(void) +{ + return 0x1U << 4U; +} +static inline u32 pwr_falcon_cpuctl_halt_intr_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_m(void) +{ + return 0x1U << 6U; +} +static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_v(u32 r) +{ + return (r >> 6U) & 0x1U; +} +static inline u32 pwr_falcon_cpuctl_alias_r(void) +{ + return 0x0010a130U; +} +static inline u32 pwr_falcon_cpuctl_alias_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 pwr_pmu_scpctl_stat_r(void) +{ + return 0x0010ac08U; +} +static inline u32 pwr_pmu_scpctl_stat_debug_mode_f(u32 v) +{ + return (v & 0x1U) << 20U; +} +static inline u32 pwr_pmu_scpctl_stat_debug_mode_m(void) +{ + return 0x1U << 20U; +} +static inline u32 pwr_pmu_scpctl_stat_debug_mode_v(u32 r) +{ + return (r >> 20U) & 0x1U; +} +static inline u32 pwr_falcon_imemc_r(u32 i) +{ + return 0x0010a180U + i*16U; +} +static inline u32 pwr_falcon_imemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 pwr_falcon_imemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 pwr_falcon_imemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 pwr_falcon_imemd_r(u32 i) +{ + return 0x0010a184U + i*16U; +} +static inline u32 pwr_falcon_imemt_r(u32 i) +{ + return 0x0010a188U + i*16U; +} +static inline u32 pwr_falcon_sctl_r(void) +{ + return 0x0010a240U; +} +static inline u32 pwr_falcon_mmu_phys_sec_r(void) +{ + return 0x00100ce4U; +} +static inline u32 pwr_falcon_bootvec_r(void) +{ + return 0x0010a104U; +} +static inline u32 pwr_falcon_bootvec_vec_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 pwr_falcon_dmactl_r(void) +{ + return 0x0010a10cU; +} +static inline u32 pwr_falcon_dmactl_dmem_scrubbing_m(void) +{ + return 0x1U << 1U; +} +static inline u32 pwr_falcon_dmactl_imem_scrubbing_m(void) +{ + return 0x1U << 2U; +} +static inline u32 pwr_falcon_hwcfg_r(void) +{ + return 0x0010a108U; +} +static inline u32 pwr_falcon_hwcfg_imem_size_v(u32 r) +{ + return (r >> 0U) & 0x1ffU; +} +static inline u32 pwr_falcon_hwcfg_dmem_size_v(u32 r) +{ + return (r >> 9U) & 0x1ffU; +} +static inline u32 pwr_falcon_dmatrfbase_r(void) +{ + return 0x0010a110U; +} +static inline u32 pwr_falcon_dmatrfbase1_r(void) +{ + return 0x0010a128U; +} +static inline u32 pwr_falcon_dmatrfmoffs_r(void) +{ + return 0x0010a114U; +} +static inline u32 pwr_falcon_dmatrfcmd_r(void) +{ + return 0x0010a118U; +} +static inline u32 pwr_falcon_dmatrfcmd_imem_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 pwr_falcon_dmatrfcmd_write_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 pwr_falcon_dmatrfcmd_size_f(u32 v) +{ + return (v & 0x7U) << 8U; +} +static inline u32 pwr_falcon_dmatrfcmd_ctxdma_f(u32 v) +{ + return (v & 0x7U) << 12U; +} +static inline u32 pwr_falcon_dmatrffboffs_r(void) +{ + return 0x0010a11cU; +} +static inline u32 pwr_falcon_exterraddr_r(void) +{ + return 0x0010a168U; +} +static inline u32 pwr_falcon_exterrstat_r(void) +{ + return 0x0010a16cU; +} +static inline u32 pwr_falcon_exterrstat_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 pwr_falcon_exterrstat_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 pwr_falcon_exterrstat_valid_true_v(void) +{ + return 0x00000001U; +} +static inline u32 pwr_pmu_falcon_icd_cmd_r(void) +{ + return 0x0010a200U; +} +static inline u32 pwr_pmu_falcon_icd_cmd_opc_s(void) +{ + return 4U; +} +static inline u32 pwr_pmu_falcon_icd_cmd_opc_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 pwr_pmu_falcon_icd_cmd_opc_m(void) +{ + return 0xfU << 0U; +} +static inline u32 pwr_pmu_falcon_icd_cmd_opc_v(u32 r) +{ + return (r >> 0U) & 0xfU; +} +static inline u32 pwr_pmu_falcon_icd_cmd_opc_rreg_f(void) +{ + return 0x8U; +} +static inline u32 pwr_pmu_falcon_icd_cmd_opc_rstat_f(void) +{ + return 0xeU; +} +static inline u32 pwr_pmu_falcon_icd_cmd_idx_f(u32 v) +{ + return (v & 0x1fU) << 8U; +} +static inline u32 pwr_pmu_falcon_icd_rdata_r(void) +{ + return 0x0010a20cU; +} +static inline u32 pwr_falcon_dmemc_r(u32 i) +{ + return 0x0010a1c0U + i*8U; +} +static inline u32 pwr_falcon_dmemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 pwr_falcon_dmemc_offs_m(void) +{ + return 0x3fU << 2U; +} +static inline u32 pwr_falcon_dmemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 pwr_falcon_dmemc_blk_m(void) +{ + return 0xffU << 8U; +} +static inline u32 pwr_falcon_dmemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 pwr_falcon_dmemc_aincr_f(u32 v) +{ + return (v & 0x1U) << 25U; +} +static inline u32 pwr_falcon_dmemd_r(u32 i) +{ + return 0x0010a1c4U + i*8U; +} +static inline u32 pwr_pmu_new_instblk_r(void) +{ + return 0x0010a480U; +} +static inline u32 pwr_pmu_new_instblk_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 pwr_pmu_new_instblk_target_fb_f(void) +{ + return 0x0U; +} +static inline u32 pwr_pmu_new_instblk_target_sys_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 pwr_pmu_new_instblk_target_sys_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 pwr_pmu_new_instblk_valid_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 pwr_pmu_mutex_id_r(void) +{ + return 0x0010a488U; +} +static inline u32 pwr_pmu_mutex_id_value_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 pwr_pmu_mutex_id_value_init_v(void) +{ + return 0x00000000U; +} +static inline u32 pwr_pmu_mutex_id_value_not_avail_v(void) +{ + return 0x000000ffU; +} +static inline u32 pwr_pmu_mutex_id_release_r(void) +{ + return 0x0010a48cU; +} +static inline u32 pwr_pmu_mutex_id_release_value_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 pwr_pmu_mutex_id_release_value_m(void) +{ + return 0xffU << 0U; +} +static inline u32 pwr_pmu_mutex_id_release_value_init_v(void) +{ + return 0x00000000U; +} +static inline u32 pwr_pmu_mutex_id_release_value_init_f(void) +{ + return 0x0U; +} +static inline u32 pwr_pmu_mutex_r(u32 i) +{ + return 0x0010a580U + i*4U; +} +static inline u32 pwr_pmu_mutex__size_1_v(void) +{ + return 0x00000010U; +} +static inline u32 pwr_pmu_mutex_value_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 pwr_pmu_mutex_value_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 pwr_pmu_mutex_value_initial_lock_f(void) +{ + return 0x0U; +} +static inline u32 pwr_pmu_queue_head_r(u32 i) +{ + return 0x0010a800U + i*4U; +} +static inline u32 pwr_pmu_queue_head__size_1_v(void) +{ + return 0x00000008U; +} +static inline u32 pwr_pmu_queue_head_address_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 pwr_pmu_queue_head_address_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 pwr_pmu_queue_tail_r(u32 i) +{ + return 0x0010a820U + i*4U; +} +static inline u32 pwr_pmu_queue_tail__size_1_v(void) +{ + return 0x00000008U; +} +static inline u32 pwr_pmu_queue_tail_address_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 pwr_pmu_queue_tail_address_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 pwr_pmu_msgq_head_r(void) +{ + return 0x0010a4c8U; +} +static inline u32 pwr_pmu_msgq_head_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 pwr_pmu_msgq_head_val_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 pwr_pmu_msgq_tail_r(void) +{ + return 0x0010a4ccU; +} +static inline u32 pwr_pmu_msgq_tail_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 pwr_pmu_msgq_tail_val_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 pwr_pmu_idle_mask_r(u32 i) +{ + return 0x0010a504U + i*16U; +} +static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void) +{ + return 0x1U; +} +static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void) +{ + return 0x200000U; +} +static inline u32 pwr_pmu_idle_count_r(u32 i) +{ + return 0x0010a508U + i*16U; +} +static inline u32 pwr_pmu_idle_count_value_f(u32 v) +{ + return (v & 0x7fffffffU) << 0U; +} +static inline u32 pwr_pmu_idle_count_value_v(u32 r) +{ + return (r >> 0U) & 0x7fffffffU; +} +static inline u32 pwr_pmu_idle_count_reset_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 pwr_pmu_idle_ctrl_r(u32 i) +{ + return 0x0010a50cU + i*16U; +} +static inline u32 pwr_pmu_idle_ctrl_value_m(void) +{ + return 0x3U << 0U; +} +static inline u32 pwr_pmu_idle_ctrl_value_busy_f(void) +{ + return 0x2U; +} +static inline u32 pwr_pmu_idle_ctrl_value_always_f(void) +{ + return 0x3U; +} +static inline u32 pwr_pmu_idle_ctrl_filter_m(void) +{ + return 0x1U << 2U; +} +static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void) +{ + return 0x0U; +} +static inline u32 pwr_pmu_idle_mask_supp_r(u32 i) +{ + return 0x0010a9f0U + i*8U; +} +static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i) +{ + return 0x0010a9f4U + i*8U; +} +static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i) +{ + return 0x0010aa30U + i*8U; +} +static inline u32 pwr_pmu_debug_r(u32 i) +{ + return 0x0010a5c0U + i*4U; +} +static inline u32 pwr_pmu_debug__size_1_v(void) +{ + return 0x00000004U; +} +static inline u32 pwr_pmu_mailbox_r(u32 i) +{ + return 0x0010a450U + i*4U; +} +static inline u32 pwr_pmu_mailbox__size_1_v(void) +{ + return 0x0000000cU; +} +static inline u32 pwr_pmu_bar0_addr_r(void) +{ + return 0x0010a7a0U; +} +static inline u32 pwr_pmu_bar0_data_r(void) +{ + return 0x0010a7a4U; +} +static inline u32 pwr_pmu_bar0_ctl_r(void) +{ + return 0x0010a7acU; +} +static inline u32 pwr_pmu_bar0_timeout_r(void) +{ + return 0x0010a7a8U; +} +static inline u32 pwr_pmu_bar0_fecs_error_r(void) +{ + return 0x0010a988U; +} +static inline u32 pwr_pmu_bar0_error_status_r(void) +{ + return 0x0010a7b0U; +} +static inline u32 pwr_pmu_pg_idlefilth_r(u32 i) +{ + return 0x0010a6c0U + i*4U; +} +static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i) +{ + return 0x0010a6e8U + i*4U; +} +static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i) +{ + return 0x0010a710U + i*4U; +} +static inline u32 pwr_pmu_pg_intren_r(u32 i) +{ + return 0x0010a760U + i*4U; +} +static inline u32 pwr_fbif_transcfg_r(u32 i) +{ + return 0x0010ae00U + i*4U; +} +static inline u32 pwr_fbif_transcfg_target_local_fb_f(void) +{ + return 0x0U; +} +static inline u32 pwr_fbif_transcfg_target_coherent_sysmem_f(void) +{ + return 0x1U; +} +static inline u32 pwr_fbif_transcfg_target_noncoherent_sysmem_f(void) +{ + return 0x2U; +} +static inline u32 pwr_fbif_transcfg_mem_type_s(void) +{ + return 1U; +} +static inline u32 pwr_fbif_transcfg_mem_type_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 pwr_fbif_transcfg_mem_type_m(void) +{ + return 0x1U << 2U; +} +static inline u32 pwr_fbif_transcfg_mem_type_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 pwr_fbif_transcfg_mem_type_virtual_f(void) +{ + return 0x0U; +} +static inline u32 pwr_fbif_transcfg_mem_type_physical_f(void) +{ + return 0x4U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ram_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ram_gv100.h new file mode 100644 index 000000000..6b3e8aa6a --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_ram_gv100.h @@ -0,0 +1,775 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_ram_gv100_h_ +#define _hw_ram_gv100_h_ + +static inline u32 ram_in_ramfc_s(void) +{ + return 4096U; +} +static inline u32 ram_in_ramfc_w(void) +{ + return 0U; +} +static inline u32 ram_in_page_dir_base_target_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 ram_in_page_dir_base_target_w(void) +{ + return 128U; +} +static inline u32 ram_in_page_dir_base_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 ram_in_page_dir_base_target_sys_mem_coh_f(void) +{ + return 0x2U; +} +static inline u32 ram_in_page_dir_base_target_sys_mem_ncoh_f(void) +{ + return 0x3U; +} +static inline u32 ram_in_page_dir_base_vol_w(void) +{ + return 128U; +} +static inline u32 ram_in_page_dir_base_vol_true_f(void) +{ + return 0x4U; +} +static inline u32 ram_in_page_dir_base_vol_false_f(void) +{ + return 0x0U; +} +static inline u32 ram_in_page_dir_base_fault_replay_tex_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 ram_in_page_dir_base_fault_replay_tex_m(void) +{ + return 0x1U << 4U; +} +static inline u32 ram_in_page_dir_base_fault_replay_tex_w(void) +{ + return 128U; +} +static inline u32 ram_in_page_dir_base_fault_replay_tex_true_f(void) +{ + return 0x10U; +} +static inline u32 ram_in_page_dir_base_fault_replay_gcc_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 ram_in_page_dir_base_fault_replay_gcc_m(void) +{ + return 0x1U << 5U; +} +static inline u32 ram_in_page_dir_base_fault_replay_gcc_w(void) +{ + return 128U; +} +static inline u32 ram_in_page_dir_base_fault_replay_gcc_true_f(void) +{ + return 0x20U; +} +static inline u32 ram_in_big_page_size_f(u32 v) +{ + return (v & 0x1U) << 11U; +} +static inline u32 ram_in_big_page_size_m(void) +{ + return 0x1U << 11U; +} +static inline u32 ram_in_big_page_size_w(void) +{ + return 128U; +} +static inline u32 ram_in_big_page_size_128kb_f(void) +{ + return 0x0U; +} +static inline u32 ram_in_big_page_size_64kb_f(void) +{ + return 0x800U; +} +static inline u32 ram_in_page_dir_base_lo_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 ram_in_page_dir_base_lo_w(void) +{ + return 128U; +} +static inline u32 ram_in_page_dir_base_hi_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ram_in_page_dir_base_hi_w(void) +{ + return 129U; +} +static inline u32 ram_in_engine_cs_w(void) +{ + return 132U; +} +static inline u32 ram_in_engine_cs_wfi_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_engine_cs_wfi_f(void) +{ + return 0x0U; +} +static inline u32 ram_in_engine_cs_fg_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_engine_cs_fg_f(void) +{ + return 0x8U; +} +static inline u32 ram_in_engine_wfi_mode_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 ram_in_engine_wfi_mode_w(void) +{ + return 132U; +} +static inline u32 ram_in_engine_wfi_mode_physical_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_engine_wfi_mode_virtual_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_engine_wfi_target_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 ram_in_engine_wfi_target_w(void) +{ + return 132U; +} +static inline u32 ram_in_engine_wfi_target_sys_mem_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 ram_in_engine_wfi_target_sys_mem_ncoh_v(void) +{ + return 0x00000003U; +} +static inline u32 ram_in_engine_wfi_target_local_mem_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_engine_wfi_ptr_lo_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 ram_in_engine_wfi_ptr_lo_w(void) +{ + return 132U; +} +static inline u32 ram_in_engine_wfi_ptr_hi_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 ram_in_engine_wfi_ptr_hi_w(void) +{ + return 133U; +} +static inline u32 ram_in_engine_wfi_veid_f(u32 v) +{ + return (v & 0x3fU) << 0U; +} +static inline u32 ram_in_engine_wfi_veid_w(void) +{ + return 134U; +} +static inline u32 ram_in_eng_method_buffer_addr_lo_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ram_in_eng_method_buffer_addr_lo_w(void) +{ + return 136U; +} +static inline u32 ram_in_eng_method_buffer_addr_hi_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ram_in_eng_method_buffer_addr_hi_w(void) +{ + return 137U; +} +static inline u32 ram_in_sc_page_dir_base_target_f(u32 v, u32 i) +{ + return (v & 0x3U) << (0U + i*0U); +} +static inline u32 ram_in_sc_page_dir_base_target__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_page_dir_base_target_vid_mem_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_sc_page_dir_base_target_invalid_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_sc_page_dir_base_target_sys_mem_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 ram_in_sc_page_dir_base_target_sys_mem_ncoh_v(void) +{ + return 0x00000003U; +} +static inline u32 ram_in_sc_page_dir_base_vol_f(u32 v, u32 i) +{ + return (v & 0x1U) << (2U + i*0U); +} +static inline u32 ram_in_sc_page_dir_base_vol__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_page_dir_base_vol_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_sc_page_dir_base_vol_false_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_f(u32 v, u32 i) +{ + return (v & 0x1U) << (4U + i*0U); +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_tex__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_enabled_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_disabled_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_f(u32 v, u32 i) +{ + return (v & 0x1U) << (5U + i*0U); +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_enabled_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_disabled_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_sc_use_ver2_pt_format_f(u32 v, u32 i) +{ + return (v & 0x1U) << (10U + i*0U); +} +static inline u32 ram_in_sc_use_ver2_pt_format__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_use_ver2_pt_format_false_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_sc_use_ver2_pt_format_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_sc_big_page_size_f(u32 v, u32 i) +{ + return (v & 0x1U) << (11U + i*0U); +} +static inline u32 ram_in_sc_big_page_size__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_big_page_size_64kb_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_sc_page_dir_base_lo_f(u32 v, u32 i) +{ + return (v & 0xfffffU) << (12U + i*0U); +} +static inline u32 ram_in_sc_page_dir_base_lo__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_page_dir_base_hi_f(u32 v, u32 i) +{ + return (v & 0xffffffffU) << (0U + i*0U); +} +static inline u32 ram_in_sc_page_dir_base_hi__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_page_dir_base_target_0_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 ram_in_sc_page_dir_base_target_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_page_dir_base_vol_0_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 ram_in_sc_page_dir_base_vol_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_use_ver2_pt_format_0_f(u32 v) +{ + return (v & 0x1U) << 10U; +} +static inline u32 ram_in_sc_use_ver2_pt_format_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_big_page_size_0_f(u32 v) +{ + return (v & 0x1U) << 11U; +} +static inline u32 ram_in_sc_big_page_size_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_page_dir_base_lo_0_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 ram_in_sc_page_dir_base_lo_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_page_dir_base_hi_0_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ram_in_sc_page_dir_base_hi_0_w(void) +{ + return 169U; +} +static inline u32 ram_in_base_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 ram_in_alloc_size_v(void) +{ + return 0x00001000U; +} +static inline u32 ram_fc_size_val_v(void) +{ + return 0x00000200U; +} +static inline u32 ram_fc_gp_put_w(void) +{ + return 0U; +} +static inline u32 ram_fc_userd_w(void) +{ + return 2U; +} +static inline u32 ram_fc_userd_hi_w(void) +{ + return 3U; +} +static inline u32 ram_fc_signature_w(void) +{ + return 4U; +} +static inline u32 ram_fc_gp_get_w(void) +{ + return 5U; +} +static inline u32 ram_fc_pb_get_w(void) +{ + return 6U; +} +static inline u32 ram_fc_pb_get_hi_w(void) +{ + return 7U; +} +static inline u32 ram_fc_pb_top_level_get_w(void) +{ + return 8U; +} +static inline u32 ram_fc_pb_top_level_get_hi_w(void) +{ + return 9U; +} +static inline u32 ram_fc_acquire_w(void) +{ + return 12U; +} +static inline u32 ram_fc_sem_addr_hi_w(void) +{ + return 14U; +} +static inline u32 ram_fc_sem_addr_lo_w(void) +{ + return 15U; +} +static inline u32 ram_fc_sem_payload_lo_w(void) +{ + return 16U; +} +static inline u32 ram_fc_sem_payload_hi_w(void) +{ + return 39U; +} +static inline u32 ram_fc_sem_execute_w(void) +{ + return 17U; +} +static inline u32 ram_fc_gp_base_w(void) +{ + return 18U; +} +static inline u32 ram_fc_gp_base_hi_w(void) +{ + return 19U; +} +static inline u32 ram_fc_gp_fetch_w(void) +{ + return 20U; +} +static inline u32 ram_fc_pb_fetch_w(void) +{ + return 21U; +} +static inline u32 ram_fc_pb_fetch_hi_w(void) +{ + return 22U; +} +static inline u32 ram_fc_pb_put_w(void) +{ + return 23U; +} +static inline u32 ram_fc_pb_put_hi_w(void) +{ + return 24U; +} +static inline u32 ram_fc_pb_header_w(void) +{ + return 33U; +} +static inline u32 ram_fc_pb_count_w(void) +{ + return 34U; +} +static inline u32 ram_fc_subdevice_w(void) +{ + return 37U; +} +static inline u32 ram_fc_target_w(void) +{ + return 43U; +} +static inline u32 ram_fc_hce_ctrl_w(void) +{ + return 57U; +} +static inline u32 ram_fc_chid_w(void) +{ + return 58U; +} +static inline u32 ram_fc_chid_id_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 ram_fc_chid_id_w(void) +{ + return 0U; +} +static inline u32 ram_fc_config_w(void) +{ + return 61U; +} +static inline u32 ram_fc_runlist_timeslice_w(void) +{ + return 62U; +} +static inline u32 ram_fc_set_channel_info_w(void) +{ + return 63U; +} +static inline u32 ram_userd_base_shift_v(void) +{ + return 0x00000009U; +} +static inline u32 ram_userd_chan_size_v(void) +{ + return 0x00000200U; +} +static inline u32 ram_userd_put_w(void) +{ + return 16U; +} +static inline u32 ram_userd_get_w(void) +{ + return 17U; +} +static inline u32 ram_userd_ref_w(void) +{ + return 18U; +} +static inline u32 ram_userd_put_hi_w(void) +{ + return 19U; +} +static inline u32 ram_userd_ref_threshold_w(void) +{ + return 20U; +} +static inline u32 ram_userd_top_level_get_w(void) +{ + return 22U; +} +static inline u32 ram_userd_top_level_get_hi_w(void) +{ + return 23U; +} +static inline u32 ram_userd_get_hi_w(void) +{ + return 24U; +} +static inline u32 ram_userd_gp_get_w(void) +{ + return 34U; +} +static inline u32 ram_userd_gp_put_w(void) +{ + return 35U; +} +static inline u32 ram_userd_gp_top_level_get_w(void) +{ + return 22U; +} +static inline u32 ram_userd_gp_top_level_get_hi_w(void) +{ + return 23U; +} +static inline u32 ram_rl_entry_size_v(void) +{ + return 0x00000010U; +} +static inline u32 ram_rl_entry_type_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 ram_rl_entry_type_channel_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_rl_entry_type_tsg_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_rl_entry_id_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 ram_rl_entry_chan_runqueue_selector_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 ram_rl_entry_chan_inst_target_f(u32 v) +{ + return (v & 0x3U) << 4U; +} +static inline u32 ram_rl_entry_chan_inst_target_sys_mem_ncoh_v(void) +{ + return 0x00000003U; +} +static inline u32 ram_rl_entry_chan_inst_target_sys_mem_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 ram_rl_entry_chan_inst_target_vid_mem_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_rl_entry_chan_userd_target_f(u32 v) +{ + return (v & 0x3U) << 6U; +} +static inline u32 ram_rl_entry_chan_userd_target_vid_mem_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_rl_entry_chan_userd_target_vid_mem_nvlink_coh_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_rl_entry_chan_userd_target_sys_mem_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(void) +{ + return 0x00000003U; +} +static inline u32 ram_rl_entry_chan_userd_ptr_lo_f(u32 v) +{ + return (v & 0xffffffU) << 8U; +} +static inline u32 ram_rl_entry_chan_userd_ptr_hi_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ram_rl_entry_chid_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 ram_rl_entry_chan_inst_ptr_lo_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 ram_rl_entry_chan_inst_ptr_hi_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ram_rl_entry_tsg_timeslice_scale_f(u32 v) +{ + return (v & 0xfU) << 16U; +} +static inline u32 ram_rl_entry_tsg_timeslice_scale_3_v(void) +{ + return 0x00000003U; +} +static inline u32 ram_rl_entry_tsg_timeslice_timeout_f(u32 v) +{ + return (v & 0xffU) << 24U; +} +static inline u32 ram_rl_entry_tsg_timeslice_timeout_128_v(void) +{ + return 0x00000080U; +} +static inline u32 ram_rl_entry_tsg_timeslice_timeout_disable_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_rl_entry_tsg_length_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 ram_rl_entry_tsg_length_init_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_rl_entry_tsg_length_min_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_rl_entry_tsg_length_max_v(void) +{ + return 0x00000080U; +} +static inline u32 ram_rl_entry_tsg_tsgid_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 ram_rl_entry_chan_userd_ptr_align_shift_v(void) +{ + return 0x00000008U; +} +static inline u32 ram_rl_entry_chan_userd_align_shift_v(void) +{ + return 0x00000008U; +} +static inline u32 ram_rl_entry_chan_inst_ptr_align_shift_v(void) +{ + return 0x0000000cU; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_therm_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_therm_gv100.h new file mode 100644 index 000000000..2ea71ef14 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_therm_gv100.h @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_therm_gv100_h_ +#define _hw_therm_gv100_h_ + +static inline u32 therm_weight_1_r(void) +{ + return 0x00020024U; +} +static inline u32 therm_config1_r(void) +{ + return 0x00020050U; +} +static inline u32 therm_config2_r(void) +{ + return 0x00020130U; +} +static inline u32 therm_config2_slowdown_factor_extended_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 therm_config2_grad_enable_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 therm_gate_ctrl_r(u32 i) +{ + return 0x00020200U + i*4U; +} +static inline u32 therm_gate_ctrl_eng_clk_m(void) +{ + return 0x3U << 0U; +} +static inline u32 therm_gate_ctrl_eng_clk_run_f(void) +{ + return 0x0U; +} +static inline u32 therm_gate_ctrl_eng_clk_auto_f(void) +{ + return 0x1U; +} +static inline u32 therm_gate_ctrl_eng_clk_stop_f(void) +{ + return 0x2U; +} +static inline u32 therm_gate_ctrl_blk_clk_m(void) +{ + return 0x3U << 2U; +} +static inline u32 therm_gate_ctrl_blk_clk_run_f(void) +{ + return 0x0U; +} +static inline u32 therm_gate_ctrl_blk_clk_auto_f(void) +{ + return 0x4U; +} +static inline u32 therm_gate_ctrl_idle_holdoff_m(void) +{ + return 0x1U << 4U; +} +static inline u32 therm_gate_ctrl_idle_holdoff_off_f(void) +{ + return 0x0U; +} +static inline u32 therm_gate_ctrl_idle_holdoff_on_f(void) +{ + return 0x10U; +} +static inline u32 therm_gate_ctrl_eng_idle_filt_exp_f(u32 v) +{ + return (v & 0x1fU) << 8U; +} +static inline u32 therm_gate_ctrl_eng_idle_filt_exp_m(void) +{ + return 0x1fU << 8U; +} +static inline u32 therm_gate_ctrl_eng_idle_filt_mant_f(u32 v) +{ + return (v & 0x7U) << 13U; +} +static inline u32 therm_gate_ctrl_eng_idle_filt_mant_m(void) +{ + return 0x7U << 13U; +} +static inline u32 therm_gate_ctrl_eng_delay_before_f(u32 v) +{ + return (v & 0xfU) << 16U; +} +static inline u32 therm_gate_ctrl_eng_delay_before_m(void) +{ + return 0xfU << 16U; +} +static inline u32 therm_gate_ctrl_eng_delay_after_f(u32 v) +{ + return (v & 0xfU) << 20U; +} +static inline u32 therm_gate_ctrl_eng_delay_after_m(void) +{ + return 0xfU << 20U; +} +static inline u32 therm_fecs_idle_filter_r(void) +{ + return 0x00020288U; +} +static inline u32 therm_fecs_idle_filter_value_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 therm_hubmmu_idle_filter_r(void) +{ + return 0x0002028cU; +} +static inline u32 therm_hubmmu_idle_filter_value_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 therm_clk_slowdown_r(u32 i) +{ + return 0x00020160U + i*4U; +} +static inline u32 therm_clk_slowdown_idle_factor_f(u32 v) +{ + return (v & 0x3fU) << 16U; +} +static inline u32 therm_clk_slowdown_idle_factor_m(void) +{ + return 0x3fU << 16U; +} +static inline u32 therm_clk_slowdown_idle_factor_v(u32 r) +{ + return (r >> 16U) & 0x3fU; +} +static inline u32 therm_clk_slowdown_idle_factor_disabled_f(void) +{ + return 0x0U; +} +static inline u32 therm_grad_stepping_table_r(u32 i) +{ + return 0x000202c8U + i*4U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v) +{ + return (v & 0x3fU) << 0U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor0_m(void) +{ + return 0x3fU << 0U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f(void) +{ + return 0x1U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f(void) +{ + return 0x2U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f(void) +{ + return 0x6U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f(void) +{ + return 0xeU; +} +static inline u32 therm_grad_stepping_table_slowdown_factor1_f(u32 v) +{ + return (v & 0x3fU) << 6U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor1_m(void) +{ + return 0x3fU << 6U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor2_f(u32 v) +{ + return (v & 0x3fU) << 12U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor2_m(void) +{ + return 0x3fU << 12U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor3_f(u32 v) +{ + return (v & 0x3fU) << 18U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor3_m(void) +{ + return 0x3fU << 18U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor4_f(u32 v) +{ + return (v & 0x3fU) << 24U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor4_m(void) +{ + return 0x3fU << 24U; +} +static inline u32 therm_grad_stepping0_r(void) +{ + return 0x000202c0U; +} +static inline u32 therm_grad_stepping0_feature_s(void) +{ + return 1U; +} +static inline u32 therm_grad_stepping0_feature_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 therm_grad_stepping0_feature_m(void) +{ + return 0x1U << 0U; +} +static inline u32 therm_grad_stepping0_feature_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 therm_grad_stepping0_feature_enable_f(void) +{ + return 0x1U; +} +static inline u32 therm_grad_stepping1_r(void) +{ + return 0x000202c4U; +} +static inline u32 therm_grad_stepping1_pdiv_duration_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 therm_clk_timing_r(u32 i) +{ + return 0x000203c0U + i*4U; +} +static inline u32 therm_clk_timing_grad_slowdown_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 therm_clk_timing_grad_slowdown_m(void) +{ + return 0x1U << 16U; +} +static inline u32 therm_clk_timing_grad_slowdown_enabled_f(void) +{ + return 0x10000U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_timer_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_timer_gv100.h new file mode 100644 index 000000000..9d76e2419 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_timer_gv100.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_timer_gv100_h_ +#define _hw_timer_gv100_h_ + +static inline u32 timer_pri_timeout_r(void) +{ + return 0x00009080U; +} +static inline u32 timer_pri_timeout_period_f(u32 v) +{ + return (v & 0xffffffU) << 0U; +} +static inline u32 timer_pri_timeout_period_m(void) +{ + return 0xffffffU << 0U; +} +static inline u32 timer_pri_timeout_period_v(u32 r) +{ + return (r >> 0U) & 0xffffffU; +} +static inline u32 timer_pri_timeout_en_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 timer_pri_timeout_en_m(void) +{ + return 0x1U << 31U; +} +static inline u32 timer_pri_timeout_en_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 timer_pri_timeout_en_en_enabled_f(void) +{ + return 0x80000000U; +} +static inline u32 timer_pri_timeout_en_en_disabled_f(void) +{ + return 0x0U; +} +static inline u32 timer_pri_timeout_save_0_r(void) +{ + return 0x00009084U; +} +static inline u32 timer_pri_timeout_save_1_r(void) +{ + return 0x00009088U; +} +static inline u32 timer_pri_timeout_fecs_errcode_r(void) +{ + return 0x0000908cU; +} +static inline u32 timer_time_0_r(void) +{ + return 0x00009400U; +} +static inline u32 timer_time_1_r(void) +{ + return 0x00009410U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_top_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_top_gv100.h new file mode 100644 index 000000000..da297b721 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_top_gv100.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_top_gv100_h_ +#define _hw_top_gv100_h_ + +static inline u32 top_num_gpcs_r(void) +{ + return 0x00022430U; +} +static inline u32 top_num_gpcs_value_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 top_tpc_per_gpc_r(void) +{ + return 0x00022434U; +} +static inline u32 top_tpc_per_gpc_value_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 top_num_fbps_r(void) +{ + return 0x00022438U; +} +static inline u32 top_num_fbps_value_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 top_ltc_per_fbp_r(void) +{ + return 0x00022450U; +} +static inline u32 top_ltc_per_fbp_value_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 top_slices_per_ltc_r(void) +{ + return 0x0002245cU; +} +static inline u32 top_slices_per_ltc_value_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 top_num_ltcs_r(void) +{ + return 0x00022454U; +} +static inline u32 top_num_ces_r(void) +{ + return 0x00022444U; +} +static inline u32 top_num_ces_value_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 top_device_info_r(u32 i) +{ + return 0x00022700U + i*4U; +} +static inline u32 top_device_info__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 top_device_info_chain_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 top_device_info_chain_enable_v(void) +{ + return 0x00000001U; +} +static inline u32 top_device_info_engine_enum_v(u32 r) +{ + return (r >> 26U) & 0xfU; +} +static inline u32 top_device_info_runlist_enum_v(u32 r) +{ + return (r >> 21U) & 0xfU; +} +static inline u32 top_device_info_intr_enum_v(u32 r) +{ + return (r >> 15U) & 0x1fU; +} +static inline u32 top_device_info_reset_enum_v(u32 r) +{ + return (r >> 9U) & 0x1fU; +} +static inline u32 top_device_info_type_enum_v(u32 r) +{ + return (r >> 2U) & 0x1fffffffU; +} +static inline u32 top_device_info_type_enum_graphics_v(void) +{ + return 0x00000000U; +} +static inline u32 top_device_info_type_enum_graphics_f(void) +{ + return 0x0U; +} +static inline u32 top_device_info_type_enum_copy2_v(void) +{ + return 0x00000003U; +} +static inline u32 top_device_info_type_enum_copy2_f(void) +{ + return 0xcU; +} +static inline u32 top_device_info_type_enum_lce_v(void) +{ + return 0x00000013U; +} +static inline u32 top_device_info_type_enum_lce_f(void) +{ + return 0x4cU; +} +static inline u32 top_device_info_engine_v(u32 r) +{ + return (r >> 5U) & 0x1U; +} +static inline u32 top_device_info_runlist_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 top_device_info_intr_v(u32 r) +{ + return (r >> 3U) & 0x1U; +} +static inline u32 top_device_info_reset_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 top_device_info_entry_v(u32 r) +{ + return (r >> 0U) & 0x3U; +} +static inline u32 top_device_info_entry_not_valid_v(void) +{ + return 0x00000000U; +} +static inline u32 top_device_info_entry_enum_v(void) +{ + return 0x00000002U; +} +static inline u32 top_device_info_entry_data_v(void) +{ + return 0x00000001U; +} +static inline u32 top_device_info_data_type_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 top_device_info_data_type_enum2_v(void) +{ + return 0x00000000U; +} +static inline u32 top_device_info_data_inst_id_v(u32 r) +{ + return (r >> 26U) & 0xfU; +} +static inline u32 top_device_info_data_pri_base_v(u32 r) +{ + return (r >> 12U) & 0xfffU; +} +static inline u32 top_device_info_data_pri_base_align_v(void) +{ + return 0x0000000cU; +} +static inline u32 top_device_info_data_fault_id_enum_v(u32 r) +{ + return (r >> 3U) & 0x7fU; +} +static inline u32 top_device_info_data_fault_id_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 top_device_info_data_fault_id_valid_v(void) +{ + return 0x00000001U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_usermode_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_usermode_gv100.h new file mode 100644 index 000000000..7b1d861ec --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_usermode_gv100.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_usermode_gv100_h_ +#define _hw_usermode_gv100_h_ + +static inline u32 usermode_cfg0_r(void) +{ + return 0x00810000U; +} +static inline u32 usermode_cfg0_class_id_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 usermode_cfg0_class_id_value_v(void) +{ + return 0x0000c361U; +} +static inline u32 usermode_time_0_r(void) +{ + return 0x00810080U; +} +static inline u32 usermode_time_0_nsec_f(u32 v) +{ + return (v & 0x7ffffffU) << 5U; +} +static inline u32 usermode_time_1_r(void) +{ + return 0x00810084U; +} +static inline u32 usermode_time_1_nsec_f(u32 v) +{ + return (v & 0x1fffffffU) << 0U; +} +static inline u32 usermode_notify_channel_pending_r(void) +{ + return 0x00810090U; +} +static inline u32 usermode_notify_channel_pending_id_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xp_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xp_gv100.h new file mode 100644 index 000000000..4296e0432 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xp_gv100.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_xp_gv100_h_ +#define _hw_xp_gv100_h_ + +static inline u32 xp_dl_mgr_r(u32 i) +{ + return 0x0008b8c0U + i*4U; +} +static inline u32 xp_dl_mgr_safe_timing_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 xp_pl_link_config_r(u32 i) +{ + return 0x0008c040U + i*4U; +} +static inline u32 xp_pl_link_config_ltssm_status_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 xp_pl_link_config_ltssm_status_idle_v(void) +{ + return 0x00000000U; +} +static inline u32 xp_pl_link_config_ltssm_directive_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 xp_pl_link_config_ltssm_directive_m(void) +{ + return 0xfU << 0U; +} +static inline u32 xp_pl_link_config_ltssm_directive_normal_operations_v(void) +{ + return 0x00000000U; +} +static inline u32 xp_pl_link_config_ltssm_directive_change_speed_v(void) +{ + return 0x00000001U; +} +static inline u32 xp_pl_link_config_max_link_rate_f(u32 v) +{ + return (v & 0x3U) << 18U; +} +static inline u32 xp_pl_link_config_max_link_rate_m(void) +{ + return 0x3U << 18U; +} +static inline u32 xp_pl_link_config_max_link_rate_2500_mtps_v(void) +{ + return 0x00000002U; +} +static inline u32 xp_pl_link_config_max_link_rate_5000_mtps_v(void) +{ + return 0x00000001U; +} +static inline u32 xp_pl_link_config_max_link_rate_8000_mtps_v(void) +{ + return 0x00000000U; +} +static inline u32 xp_pl_link_config_target_tx_width_f(u32 v) +{ + return (v & 0x7U) << 20U; +} +static inline u32 xp_pl_link_config_target_tx_width_m(void) +{ + return 0x7U << 20U; +} +static inline u32 xp_pl_link_config_target_tx_width_x1_v(void) +{ + return 0x00000007U; +} +static inline u32 xp_pl_link_config_target_tx_width_x2_v(void) +{ + return 0x00000006U; +} +static inline u32 xp_pl_link_config_target_tx_width_x4_v(void) +{ + return 0x00000005U; +} +static inline u32 xp_pl_link_config_target_tx_width_x8_v(void) +{ + return 0x00000004U; +} +static inline u32 xp_pl_link_config_target_tx_width_x16_v(void) +{ + return 0x00000000U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xve_gv100.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xve_gv100.h new file mode 100644 index 000000000..fc7aa72e8 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv100/hw_xve_gv100.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_xve_gv100_h_ +#define _hw_xve_gv100_h_ + +static inline u32 xve_rom_ctrl_r(void) +{ + return 0x00000050U; +} +static inline u32 xve_rom_ctrl_rom_shadow_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 xve_rom_ctrl_rom_shadow_disabled_f(void) +{ + return 0x0U; +} +static inline u32 xve_rom_ctrl_rom_shadow_enabled_f(void) +{ + return 0x1U; +} +static inline u32 xve_link_control_status_r(void) +{ + return 0x00000088U; +} +static inline u32 xve_link_control_status_link_speed_m(void) +{ + return 0xfU << 16U; +} +static inline u32 xve_link_control_status_link_speed_v(u32 r) +{ + return (r >> 16U) & 0xfU; +} +static inline u32 xve_link_control_status_link_speed_link_speed_2p5_v(void) +{ + return 0x00000001U; +} +static inline u32 xve_link_control_status_link_speed_link_speed_5p0_v(void) +{ + return 0x00000002U; +} +static inline u32 xve_link_control_status_link_speed_link_speed_8p0_v(void) +{ + return 0x00000003U; +} +static inline u32 xve_link_control_status_link_width_m(void) +{ + return 0x3fU << 20U; +} +static inline u32 xve_link_control_status_link_width_v(u32 r) +{ + return (r >> 20U) & 0x3fU; +} +static inline u32 xve_link_control_status_link_width_x1_v(void) +{ + return 0x00000001U; +} +static inline u32 xve_link_control_status_link_width_x2_v(void) +{ + return 0x00000002U; +} +static inline u32 xve_link_control_status_link_width_x4_v(void) +{ + return 0x00000004U; +} +static inline u32 xve_link_control_status_link_width_x8_v(void) +{ + return 0x00000008U; +} +static inline u32 xve_link_control_status_link_width_x16_v(void) +{ + return 0x00000010U; +} +static inline u32 xve_priv_xv_r(void) +{ + return 0x00000150U; +} +static inline u32 xve_priv_xv_cya_l0s_enable_f(u32 v) +{ + return (v & 0x1U) << 7U; +} +static inline u32 xve_priv_xv_cya_l0s_enable_m(void) +{ + return 0x1U << 7U; +} +static inline u32 xve_priv_xv_cya_l0s_enable_v(u32 r) +{ + return (r >> 7U) & 0x1U; +} +static inline u32 xve_priv_xv_cya_l1_enable_f(u32 v) +{ + return (v & 0x1U) << 8U; +} +static inline u32 xve_priv_xv_cya_l1_enable_m(void) +{ + return 0x1U << 8U; +} +static inline u32 xve_priv_xv_cya_l1_enable_v(u32 r) +{ + return (r >> 8U) & 0x1U; +} +static inline u32 xve_cya_2_r(void) +{ + return 0x00000704U; +} +static inline u32 xve_reset_r(void) +{ + return 0x00000718U; +} +static inline u32 xve_reset_reset_m(void) +{ + return 0x1U << 0U; +} +static inline u32 xve_reset_gpu_on_sw_reset_m(void) +{ + return 0x1U << 1U; +} +static inline u32 xve_reset_counter_en_m(void) +{ + return 0x1U << 2U; +} +static inline u32 xve_reset_counter_val_f(u32 v) +{ + return (v & 0x7ffU) << 4U; +} +static inline u32 xve_reset_counter_val_m(void) +{ + return 0x7ffU << 4U; +} +static inline u32 xve_reset_counter_val_v(u32 r) +{ + return (r >> 4U) & 0x7ffU; +} +static inline u32 xve_reset_clock_on_sw_reset_m(void) +{ + return 0x1U << 15U; +} +static inline u32 xve_reset_clock_counter_en_m(void) +{ + return 0x1U << 16U; +} +static inline u32 xve_reset_clock_counter_val_f(u32 v) +{ + return (v & 0x7ffU) << 17U; +} +static inline u32 xve_reset_clock_counter_val_m(void) +{ + return 0x7ffU << 17U; +} +static inline u32 xve_reset_clock_counter_val_v(u32 r) +{ + return (r >> 17U) & 0x7ffU; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_bus_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_bus_gv11b.h new file mode 100644 index 000000000..d1d9b34a2 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_bus_gv11b.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_bus_gv11b_h_ +#define _hw_bus_gv11b_h_ + +static inline u32 bus_bar0_window_r(void) +{ + return 0x00001700U; +} +static inline u32 bus_bar0_window_base_f(u32 v) +{ + return (v & 0xffffffU) << 0U; +} +static inline u32 bus_bar0_window_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 bus_bar0_window_target_sys_mem_coherent_f(void) +{ + return 0x2000000U; +} +static inline u32 bus_bar0_window_target_sys_mem_noncoherent_f(void) +{ + return 0x3000000U; +} +static inline u32 bus_bar0_window_target_bar0_window_base_shift_v(void) +{ + return 0x00000010U; +} +static inline u32 bus_bar1_block_r(void) +{ + return 0x00001704U; +} +static inline u32 bus_bar1_block_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 bus_bar1_block_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 bus_bar1_block_target_sys_mem_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 bus_bar1_block_target_sys_mem_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 bus_bar1_block_mode_virtual_f(void) +{ + return 0x80000000U; +} +static inline u32 bus_bar2_block_r(void) +{ + return 0x00001714U; +} +static inline u32 bus_bar2_block_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 bus_bar2_block_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 bus_bar2_block_target_sys_mem_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 bus_bar2_block_target_sys_mem_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 bus_bar2_block_mode_virtual_f(void) +{ + return 0x80000000U; +} +static inline u32 bus_bar1_block_ptr_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 bus_bar2_block_ptr_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 bus_bind_status_r(void) +{ + return 0x00001710U; +} +static inline u32 bus_bind_status_bar1_pending_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 bus_bind_status_bar1_pending_empty_f(void) +{ + return 0x0U; +} +static inline u32 bus_bind_status_bar1_pending_busy_f(void) +{ + return 0x1U; +} +static inline u32 bus_bind_status_bar1_outstanding_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 bus_bind_status_bar1_outstanding_false_f(void) +{ + return 0x0U; +} +static inline u32 bus_bind_status_bar1_outstanding_true_f(void) +{ + return 0x2U; +} +static inline u32 bus_bind_status_bar2_pending_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 bus_bind_status_bar2_pending_empty_f(void) +{ + return 0x0U; +} +static inline u32 bus_bind_status_bar2_pending_busy_f(void) +{ + return 0x4U; +} +static inline u32 bus_bind_status_bar2_outstanding_v(u32 r) +{ + return (r >> 3U) & 0x1U; +} +static inline u32 bus_bind_status_bar2_outstanding_false_f(void) +{ + return 0x0U; +} +static inline u32 bus_bind_status_bar2_outstanding_true_f(void) +{ + return 0x8U; +} +static inline u32 bus_intr_0_r(void) +{ + return 0x00001100U; +} +static inline u32 bus_intr_0_pri_squash_m(void) +{ + return 0x1U << 1U; +} +static inline u32 bus_intr_0_pri_fecserr_m(void) +{ + return 0x1U << 2U; +} +static inline u32 bus_intr_0_pri_timeout_m(void) +{ + return 0x1U << 3U; +} +static inline u32 bus_intr_en_0_r(void) +{ + return 0x00001140U; +} +static inline u32 bus_intr_en_0_pri_squash_m(void) +{ + return 0x1U << 1U; +} +static inline u32 bus_intr_en_0_pri_fecserr_m(void) +{ + return 0x1U << 2U; +} +static inline u32 bus_intr_en_0_pri_timeout_m(void) +{ + return 0x1U << 3U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ccsr_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ccsr_gv11b.h new file mode 100644 index 000000000..e21a47388 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ccsr_gv11b.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_ccsr_gv11b_h_ +#define _hw_ccsr_gv11b_h_ + +static inline u32 ccsr_channel_inst_r(u32 i) +{ + return 0x00800000U + i*8U; +} +static inline u32 ccsr_channel_inst__size_1_v(void) +{ + return 0x00000200U; +} +static inline u32 ccsr_channel_inst_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 ccsr_channel_inst_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 ccsr_channel_inst_target_sys_mem_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 ccsr_channel_inst_target_sys_mem_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 ccsr_channel_inst_bind_false_f(void) +{ + return 0x0U; +} +static inline u32 ccsr_channel_inst_bind_true_f(void) +{ + return 0x80000000U; +} +static inline u32 ccsr_channel_r(u32 i) +{ + return 0x00800004U + i*8U; +} +static inline u32 ccsr_channel__size_1_v(void) +{ + return 0x00000200U; +} +static inline u32 ccsr_channel_enable_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ccsr_channel_enable_set_f(u32 v) +{ + return (v & 0x1U) << 10U; +} +static inline u32 ccsr_channel_enable_set_true_f(void) +{ + return 0x400U; +} +static inline u32 ccsr_channel_enable_clr_true_f(void) +{ + return 0x800U; +} +static inline u32 ccsr_channel_status_v(u32 r) +{ + return (r >> 24U) & 0xfU; +} +static inline u32 ccsr_channel_status_pending_ctx_reload_v(void) +{ + return 0x00000002U; +} +static inline u32 ccsr_channel_status_pending_acq_ctx_reload_v(void) +{ + return 0x00000004U; +} +static inline u32 ccsr_channel_status_on_pbdma_ctx_reload_v(void) +{ + return 0x0000000aU; +} +static inline u32 ccsr_channel_status_on_pbdma_and_eng_ctx_reload_v(void) +{ + return 0x0000000bU; +} +static inline u32 ccsr_channel_status_on_eng_ctx_reload_v(void) +{ + return 0x0000000cU; +} +static inline u32 ccsr_channel_status_on_eng_pending_ctx_reload_v(void) +{ + return 0x0000000dU; +} +static inline u32 ccsr_channel_status_on_eng_pending_acq_ctx_reload_v(void) +{ + return 0x0000000eU; +} +static inline u32 ccsr_channel_next_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 ccsr_channel_next_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ccsr_channel_force_ctx_reload_true_f(void) +{ + return 0x100U; +} +static inline u32 ccsr_channel_pbdma_faulted_f(u32 v) +{ + return (v & 0x1U) << 22U; +} +static inline u32 ccsr_channel_pbdma_faulted_reset_f(void) +{ + return 0x400000U; +} +static inline u32 ccsr_channel_eng_faulted_f(u32 v) +{ + return (v & 0x1U) << 23U; +} +static inline u32 ccsr_channel_eng_faulted_v(u32 r) +{ + return (r >> 23U) & 0x1U; +} +static inline u32 ccsr_channel_eng_faulted_reset_f(void) +{ + return 0x800000U; +} +static inline u32 ccsr_channel_eng_faulted_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ccsr_channel_busy_v(u32 r) +{ + return (r >> 28U) & 0x1U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ce_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ce_gv11b.h new file mode 100644 index 000000000..efc14d002 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ce_gv11b.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_ce_gv11b_h_ +#define _hw_ce_gv11b_h_ + +static inline u32 ce_intr_status_r(u32 i) +{ + return 0x00104410U + i*128U; +} +static inline u32 ce_intr_status_blockpipe_pending_f(void) +{ + return 0x1U; +} +static inline u32 ce_intr_status_blockpipe_reset_f(void) +{ + return 0x1U; +} +static inline u32 ce_intr_status_nonblockpipe_pending_f(void) +{ + return 0x2U; +} +static inline u32 ce_intr_status_nonblockpipe_reset_f(void) +{ + return 0x2U; +} +static inline u32 ce_intr_status_launcherr_pending_f(void) +{ + return 0x4U; +} +static inline u32 ce_intr_status_launcherr_reset_f(void) +{ + return 0x4U; +} +static inline u32 ce_intr_status_invalid_config_pending_f(void) +{ + return 0x8U; +} +static inline u32 ce_intr_status_invalid_config_reset_f(void) +{ + return 0x8U; +} +static inline u32 ce_intr_status_mthd_buffer_fault_pending_f(void) +{ + return 0x10U; +} +static inline u32 ce_intr_status_mthd_buffer_fault_reset_f(void) +{ + return 0x10U; +} +static inline u32 ce_pce_map_r(void) +{ + return 0x00104028U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h new file mode 100644 index 000000000..623a8c158 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ctxsw_prog_gv11b.h @@ -0,0 +1,455 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_ctxsw_prog_gv11b_h_ +#define _hw_ctxsw_prog_gv11b_h_ + +static inline u32 ctxsw_prog_fecs_header_v(void) +{ + return 0x00000100U; +} +static inline u32 ctxsw_prog_main_image_num_gpcs_o(void) +{ + return 0x00000008U; +} +static inline u32 ctxsw_prog_main_image_ctl_o(void) +{ + return 0x0000000cU; +} +static inline u32 ctxsw_prog_main_image_ctl_type_f(u32 v) +{ + return (v & 0x3fU) << 0U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_undefined_v(void) +{ + return 0x00000000U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_opengl_v(void) +{ + return 0x00000008U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_dx9_v(void) +{ + return 0x00000010U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_dx10_v(void) +{ + return 0x00000011U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_dx11_v(void) +{ + return 0x00000012U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_compute_v(void) +{ + return 0x00000020U; +} +static inline u32 ctxsw_prog_main_image_ctl_type_per_veid_header_v(void) +{ + return 0x00000021U; +} +static inline u32 ctxsw_prog_main_image_patch_count_o(void) +{ + return 0x00000010U; +} +static inline u32 ctxsw_prog_main_image_context_id_o(void) +{ + return 0x000000f0U; +} +static inline u32 ctxsw_prog_main_image_patch_adr_lo_o(void) +{ + return 0x00000014U; +} +static inline u32 ctxsw_prog_main_image_patch_adr_hi_o(void) +{ + return 0x00000018U; +} +static inline u32 ctxsw_prog_main_image_zcull_o(void) +{ + return 0x0000001cU; +} +static inline u32 ctxsw_prog_main_image_zcull_mode_no_ctxsw_v(void) +{ + return 0x00000001U; +} +static inline u32 ctxsw_prog_main_image_zcull_mode_separate_buffer_v(void) +{ + return 0x00000002U; +} +static inline u32 ctxsw_prog_main_image_zcull_ptr_o(void) +{ + return 0x00000020U; +} +static inline u32 ctxsw_prog_main_image_pm_o(void) +{ + return 0x00000028U; +} +static inline u32 ctxsw_prog_main_image_pm_mode_m(void) +{ + return 0x7U << 0U; +} +static inline u32 ctxsw_prog_main_image_pm_mode_no_ctxsw_f(void) +{ + return 0x0U; +} +static inline u32 ctxsw_prog_main_image_pm_smpc_mode_m(void) +{ + return 0x7U << 3U; +} +static inline u32 ctxsw_prog_main_image_pm_smpc_mode_ctxsw_f(void) +{ + return 0x8U; +} +static inline u32 ctxsw_prog_main_image_pm_smpc_mode_no_ctxsw_f(void) +{ + return 0x0U; +} +static inline u32 ctxsw_prog_main_image_pm_ptr_o(void) +{ + return 0x0000002cU; +} +static inline u32 ctxsw_prog_main_image_num_save_ops_o(void) +{ + return 0x000000f4U; +} +static inline u32 ctxsw_prog_main_image_num_wfi_save_ops_o(void) +{ + return 0x000000d0U; +} +static inline u32 ctxsw_prog_main_image_num_cta_save_ops_o(void) +{ + return 0x000000d4U; +} +static inline u32 ctxsw_prog_main_image_num_gfxp_save_ops_o(void) +{ + return 0x000000d8U; +} +static inline u32 ctxsw_prog_main_image_num_cilp_save_ops_o(void) +{ + return 0x000000dcU; +} +static inline u32 ctxsw_prog_main_image_num_restore_ops_o(void) +{ + return 0x000000f8U; +} +static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_o(void) +{ + return 0x00000060U; +} +static inline u32 ctxsw_prog_main_image_zcull_ptr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_pm_ptr_hi_o(void) +{ + return 0x00000094U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_o(void) +{ + return 0x00000064U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_o(void) +{ + return 0x00000068U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_o(void) +{ + return 0x00000070U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_o(void) +{ + return 0x00000074U; +} +static inline u32 ctxsw_prog_main_image_full_preemption_ptr_veid0_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_o(void) +{ + return 0x00000078U; +} +static inline u32 ctxsw_prog_main_image_context_buffer_ptr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_context_buffer_ptr_o(void) +{ + return 0x0000007cU; +} +static inline u32 ctxsw_prog_main_image_context_buffer_ptr_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_magic_value_o(void) +{ + return 0x000000fcU; +} +static inline u32 ctxsw_prog_main_image_magic_value_v_value_v(void) +{ + return 0x600dc0deU; +} +static inline u32 ctxsw_prog_local_priv_register_ctl_o(void) +{ + return 0x0000000cU; +} +static inline u32 ctxsw_prog_local_priv_register_ctl_offset_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ctxsw_prog_main_image_global_cb_ptr_o(void) +{ + return 0x000000b8U; +} +static inline u32 ctxsw_prog_main_image_global_cb_ptr_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_o(void) +{ + return 0x000000bcU; +} +static inline u32 ctxsw_prog_main_image_global_cb_ptr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_o(void) +{ + return 0x000000c0U; +} +static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_o(void) +{ + return 0x000000c4U; +} +static inline u32 ctxsw_prog_main_image_global_pagepool_ptr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_control_block_ptr_o(void) +{ + return 0x000000c8U; +} +static inline u32 ctxsw_prog_main_image_control_block_ptr_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_o(void) +{ + return 0x000000ccU; +} +static inline u32 ctxsw_prog_main_image_control_block_ptr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_o(void) +{ + return 0x000000e0U; +} +static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_lo_v_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_o(void) +{ + return 0x000000e4U; +} +static inline u32 ctxsw_prog_main_image_context_ramchain_buffer_addr_hi_v_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ctxsw_prog_local_image_ppc_info_o(void) +{ + return 0x000000f4U; +} +static inline u32 ctxsw_prog_local_image_ppc_info_num_ppcs_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ctxsw_prog_local_image_ppc_info_ppc_mask_v(u32 r) +{ + return (r >> 16U) & 0xffffU; +} +static inline u32 ctxsw_prog_local_image_num_tpcs_o(void) +{ + return 0x000000f8U; +} +static inline u32 ctxsw_prog_local_magic_value_o(void) +{ + return 0x000000fcU; +} +static inline u32 ctxsw_prog_local_magic_value_v_value_v(void) +{ + return 0xad0becabU; +} +static inline u32 ctxsw_prog_main_extended_buffer_ctl_o(void) +{ + return 0x000000ecU; +} +static inline u32 ctxsw_prog_main_extended_buffer_ctl_offset_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ctxsw_prog_main_extended_buffer_ctl_size_v(u32 r) +{ + return (r >> 16U) & 0xffU; +} +static inline u32 ctxsw_prog_extended_buffer_segments_size_in_bytes_v(void) +{ + return 0x00000100U; +} +static inline u32 ctxsw_prog_extended_marker_size_in_bytes_v(void) +{ + return 0x00000004U; +} +static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_register_stride_v(void) +{ + return 0x00000000U; +} +static inline u32 ctxsw_prog_extended_sm_dsm_perf_counter_control_register_stride_v(void) +{ + return 0x00000002U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_o(void) +{ + return 0x000000a0U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_s(void) +{ + return 2U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_m(void) +{ + return 0x3U << 0U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_v(u32 r) +{ + return (r >> 0U) & 0x3U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_allow_all_f(void) +{ + return 0x0U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_config_mode_use_map_f(void) +{ + return 0x2U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_addr_lo_o(void) +{ + return 0x000000a4U; +} +static inline u32 ctxsw_prog_main_image_priv_access_map_addr_hi_o(void) +{ + return 0x000000a8U; +} +static inline u32 ctxsw_prog_main_image_misc_options_o(void) +{ + return 0x0000003cU; +} +static inline u32 ctxsw_prog_main_image_misc_options_verif_features_m(void) +{ + return 0x1U << 3U; +} +static inline u32 ctxsw_prog_main_image_misc_options_verif_features_disabled_f(void) +{ + return 0x0U; +} +static inline u32 ctxsw_prog_main_image_graphics_preemption_options_o(void) +{ + return 0x00000080U; +} +static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 ctxsw_prog_main_image_graphics_preemption_options_control_gfxp_f(void) +{ + return 0x1U; +} +static inline u32 ctxsw_prog_main_image_compute_preemption_options_o(void) +{ + return 0x00000084U; +} +static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cta_f(void) +{ + return 0x1U; +} +static inline u32 ctxsw_prog_main_image_compute_preemption_options_control_cilp_f(void) +{ + return 0x2U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_falcon_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_falcon_gv11b.h new file mode 100644 index 000000000..4bb8f2de1 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_falcon_gv11b.h @@ -0,0 +1,599 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_falcon_gv11b_h_ +#define _hw_falcon_gv11b_h_ + +static inline u32 falcon_falcon_irqsset_r(void) +{ + return 0x00000000U; +} +static inline u32 falcon_falcon_irqsset_swgen0_set_f(void) +{ + return 0x40U; +} +static inline u32 falcon_falcon_irqsclr_r(void) +{ + return 0x00000004U; +} +static inline u32 falcon_falcon_irqstat_r(void) +{ + return 0x00000008U; +} +static inline u32 falcon_falcon_irqstat_halt_true_f(void) +{ + return 0x10U; +} +static inline u32 falcon_falcon_irqstat_exterr_true_f(void) +{ + return 0x20U; +} +static inline u32 falcon_falcon_irqstat_swgen0_true_f(void) +{ + return 0x40U; +} +static inline u32 falcon_falcon_irqmode_r(void) +{ + return 0x0000000cU; +} +static inline u32 falcon_falcon_irqmset_r(void) +{ + return 0x00000010U; +} +static inline u32 falcon_falcon_irqmset_gptmr_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 falcon_falcon_irqmset_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 falcon_falcon_irqmset_mthd_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 falcon_falcon_irqmset_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 falcon_falcon_irqmset_halt_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 falcon_falcon_irqmset_exterr_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 falcon_falcon_irqmset_swgen0_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 falcon_falcon_irqmset_swgen1_f(u32 v) +{ + return (v & 0x1U) << 7U; +} +static inline u32 falcon_falcon_irqmclr_r(void) +{ + return 0x00000014U; +} +static inline u32 falcon_falcon_irqmclr_gptmr_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 falcon_falcon_irqmclr_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 falcon_falcon_irqmclr_mthd_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 falcon_falcon_irqmclr_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 falcon_falcon_irqmclr_halt_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 falcon_falcon_irqmclr_exterr_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 falcon_falcon_irqmclr_swgen0_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 falcon_falcon_irqmclr_swgen1_f(u32 v) +{ + return (v & 0x1U) << 7U; +} +static inline u32 falcon_falcon_irqmclr_ext_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 falcon_falcon_irqmask_r(void) +{ + return 0x00000018U; +} +static inline u32 falcon_falcon_irqdest_r(void) +{ + return 0x0000001cU; +} +static inline u32 falcon_falcon_irqdest_host_gptmr_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 falcon_falcon_irqdest_host_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 falcon_falcon_irqdest_host_mthd_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 falcon_falcon_irqdest_host_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 falcon_falcon_irqdest_host_halt_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 falcon_falcon_irqdest_host_exterr_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 falcon_falcon_irqdest_host_swgen0_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 falcon_falcon_irqdest_host_swgen1_f(u32 v) +{ + return (v & 0x1U) << 7U; +} +static inline u32 falcon_falcon_irqdest_host_ext_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 falcon_falcon_irqdest_target_gptmr_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 falcon_falcon_irqdest_target_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 17U; +} +static inline u32 falcon_falcon_irqdest_target_mthd_f(u32 v) +{ + return (v & 0x1U) << 18U; +} +static inline u32 falcon_falcon_irqdest_target_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 19U; +} +static inline u32 falcon_falcon_irqdest_target_halt_f(u32 v) +{ + return (v & 0x1U) << 20U; +} +static inline u32 falcon_falcon_irqdest_target_exterr_f(u32 v) +{ + return (v & 0x1U) << 21U; +} +static inline u32 falcon_falcon_irqdest_target_swgen0_f(u32 v) +{ + return (v & 0x1U) << 22U; +} +static inline u32 falcon_falcon_irqdest_target_swgen1_f(u32 v) +{ + return (v & 0x1U) << 23U; +} +static inline u32 falcon_falcon_irqdest_target_ext_f(u32 v) +{ + return (v & 0xffU) << 24U; +} +static inline u32 falcon_falcon_curctx_r(void) +{ + return 0x00000050U; +} +static inline u32 falcon_falcon_nxtctx_r(void) +{ + return 0x00000054U; +} +static inline u32 falcon_falcon_mailbox0_r(void) +{ + return 0x00000040U; +} +static inline u32 falcon_falcon_mailbox1_r(void) +{ + return 0x00000044U; +} +static inline u32 falcon_falcon_itfen_r(void) +{ + return 0x00000048U; +} +static inline u32 falcon_falcon_itfen_ctxen_enable_f(void) +{ + return 0x1U; +} +static inline u32 falcon_falcon_idlestate_r(void) +{ + return 0x0000004cU; +} +static inline u32 falcon_falcon_idlestate_falcon_busy_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 falcon_falcon_idlestate_ext_busy_v(u32 r) +{ + return (r >> 1U) & 0x7fffU; +} +static inline u32 falcon_falcon_os_r(void) +{ + return 0x00000080U; +} +static inline u32 falcon_falcon_engctl_r(void) +{ + return 0x000000a4U; +} +static inline u32 falcon_falcon_cpuctl_r(void) +{ + return 0x00000100U; +} +static inline u32 falcon_falcon_cpuctl_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 falcon_falcon_cpuctl_sreset_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 falcon_falcon_cpuctl_hreset_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 falcon_falcon_cpuctl_halt_intr_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 falcon_falcon_cpuctl_halt_intr_m(void) +{ + return 0x1U << 4U; +} +static inline u32 falcon_falcon_cpuctl_halt_intr_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 falcon_falcon_cpuctl_stopped_m(void) +{ + return 0x1U << 5U; +} +static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_m(void) +{ + return 0x1U << 6U; +} +static inline u32 falcon_falcon_cpuctl_cpuctl_alias_en_v(u32 r) +{ + return (r >> 6U) & 0x1U; +} +static inline u32 falcon_falcon_cpuctl_alias_r(void) +{ + return 0x00000130U; +} +static inline u32 falcon_falcon_cpuctl_alias_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 falcon_falcon_imemc_r(u32 i) +{ + return 0x00000180U + i*16U; +} +static inline u32 falcon_falcon_imemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 falcon_falcon_imemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 falcon_falcon_imemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 falcon_falcon_imemd_r(u32 i) +{ + return 0x00000184U + i*16U; +} +static inline u32 falcon_falcon_imemt_r(u32 i) +{ + return 0x00000188U + i*16U; +} +static inline u32 falcon_falcon_sctl_r(void) +{ + return 0x00000240U; +} +static inline u32 falcon_falcon_mmu_phys_sec_r(void) +{ + return 0x00100ce4U; +} +static inline u32 falcon_falcon_bootvec_r(void) +{ + return 0x00000104U; +} +static inline u32 falcon_falcon_bootvec_vec_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 falcon_falcon_dmactl_r(void) +{ + return 0x0000010cU; +} +static inline u32 falcon_falcon_dmactl_dmem_scrubbing_m(void) +{ + return 0x1U << 1U; +} +static inline u32 falcon_falcon_dmactl_imem_scrubbing_m(void) +{ + return 0x1U << 2U; +} +static inline u32 falcon_falcon_dmactl_require_ctx_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 falcon_falcon_hwcfg_r(void) +{ + return 0x00000108U; +} +static inline u32 falcon_falcon_hwcfg_imem_size_v(u32 r) +{ + return (r >> 0U) & 0x1ffU; +} +static inline u32 falcon_falcon_hwcfg_dmem_size_v(u32 r) +{ + return (r >> 9U) & 0x1ffU; +} +static inline u32 falcon_falcon_dmatrfbase_r(void) +{ + return 0x00000110U; +} +static inline u32 falcon_falcon_dmatrfbase1_r(void) +{ + return 0x00000128U; +} +static inline u32 falcon_falcon_dmatrfmoffs_r(void) +{ + return 0x00000114U; +} +static inline u32 falcon_falcon_dmatrfcmd_r(void) +{ + return 0x00000118U; +} +static inline u32 falcon_falcon_dmatrfcmd_imem_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 falcon_falcon_dmatrfcmd_write_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 falcon_falcon_dmatrfcmd_size_f(u32 v) +{ + return (v & 0x7U) << 8U; +} +static inline u32 falcon_falcon_dmatrfcmd_ctxdma_f(u32 v) +{ + return (v & 0x7U) << 12U; +} +static inline u32 falcon_falcon_dmatrffboffs_r(void) +{ + return 0x0000011cU; +} +static inline u32 falcon_falcon_imctl_debug_r(void) +{ + return 0x0000015cU; +} +static inline u32 falcon_falcon_imctl_debug_addr_blk_f(u32 v) +{ + return (v & 0xffffffU) << 0U; +} +static inline u32 falcon_falcon_imctl_debug_cmd_f(u32 v) +{ + return (v & 0x7U) << 24U; +} +static inline u32 falcon_falcon_imstat_r(void) +{ + return 0x00000144U; +} +static inline u32 falcon_falcon_traceidx_r(void) +{ + return 0x00000148U; +} +static inline u32 falcon_falcon_traceidx_maxidx_v(u32 r) +{ + return (r >> 16U) & 0xffU; +} +static inline u32 falcon_falcon_traceidx_idx_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 falcon_falcon_tracepc_r(void) +{ + return 0x0000014cU; +} +static inline u32 falcon_falcon_tracepc_pc_v(u32 r) +{ + return (r >> 0U) & 0xffffffU; +} +static inline u32 falcon_falcon_exterraddr_r(void) +{ + return 0x00000168U; +} +static inline u32 falcon_falcon_exterrstat_r(void) +{ + return 0x0000016cU; +} +static inline u32 falcon_falcon_exterrstat_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 falcon_falcon_exterrstat_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 falcon_falcon_exterrstat_valid_true_v(void) +{ + return 0x00000001U; +} +static inline u32 falcon_falcon_icd_cmd_r(void) +{ + return 0x00000200U; +} +static inline u32 falcon_falcon_icd_cmd_opc_s(void) +{ + return 4U; +} +static inline u32 falcon_falcon_icd_cmd_opc_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 falcon_falcon_icd_cmd_opc_m(void) +{ + return 0xfU << 0U; +} +static inline u32 falcon_falcon_icd_cmd_opc_v(u32 r) +{ + return (r >> 0U) & 0xfU; +} +static inline u32 falcon_falcon_icd_cmd_opc_rreg_f(void) +{ + return 0x8U; +} +static inline u32 falcon_falcon_icd_cmd_opc_rstat_f(void) +{ + return 0xeU; +} +static inline u32 falcon_falcon_icd_cmd_idx_f(u32 v) +{ + return (v & 0x1fU) << 8U; +} +static inline u32 falcon_falcon_icd_rdata_r(void) +{ + return 0x0000020cU; +} +static inline u32 falcon_falcon_dmemc_r(u32 i) +{ + return 0x000001c0U + i*8U; +} +static inline u32 falcon_falcon_dmemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 falcon_falcon_dmemc_offs_m(void) +{ + return 0x3fU << 2U; +} +static inline u32 falcon_falcon_dmemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 falcon_falcon_dmemc_blk_m(void) +{ + return 0xffU << 8U; +} +static inline u32 falcon_falcon_dmemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 falcon_falcon_dmemc_aincr_f(u32 v) +{ + return (v & 0x1U) << 25U; +} +static inline u32 falcon_falcon_dmemd_r(u32 i) +{ + return 0x000001c4U + i*8U; +} +static inline u32 falcon_falcon_debug1_r(void) +{ + return 0x00000090U; +} +static inline u32 falcon_falcon_debug1_ctxsw_mode_s(void) +{ + return 1U; +} +static inline u32 falcon_falcon_debug1_ctxsw_mode_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 falcon_falcon_debug1_ctxsw_mode_m(void) +{ + return 0x1U << 16U; +} +static inline u32 falcon_falcon_debug1_ctxsw_mode_v(u32 r) +{ + return (r >> 16U) & 0x1U; +} +static inline u32 falcon_falcon_debug1_ctxsw_mode_init_f(void) +{ + return 0x0U; +} +static inline u32 falcon_falcon_debuginfo_r(void) +{ + return 0x00000094U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fb_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fb_gv11b.h new file mode 100644 index 000000000..ea3c79391 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fb_gv11b.h @@ -0,0 +1,1827 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_fb_gv11b_h_ +#define _hw_fb_gv11b_h_ + +static inline u32 fb_fbhub_num_active_ltcs_r(void) +{ + return 0x00100800U; +} +static inline u32 fb_mmu_ctrl_r(void) +{ + return 0x00100c80U; +} +static inline u32 fb_mmu_ctrl_vm_pg_size_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 fb_mmu_ctrl_vm_pg_size_128kb_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_ctrl_vm_pg_size_64kb_f(void) +{ + return 0x1U; +} +static inline u32 fb_mmu_ctrl_pri_fifo_empty_v(u32 r) +{ + return (r >> 15U) & 0x1U; +} +static inline u32 fb_mmu_ctrl_pri_fifo_empty_false_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_ctrl_pri_fifo_space_v(u32 r) +{ + return (r >> 16U) & 0xffU; +} +static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_v(u32 r) +{ + return (r >> 11U) & 0x1U; +} +static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_true_f(void) +{ + return 0x800U; +} +static inline u32 fb_mmu_ctrl_use_pdb_big_page_size_false_f(void) +{ + return 0x0U; +} +static inline u32 fb_priv_mmu_phy_secure_r(void) +{ + return 0x00100ce4U; +} +static inline u32 fb_mmu_invalidate_pdb_r(void) +{ + return 0x00100cb8U; +} +static inline u32 fb_mmu_invalidate_pdb_aperture_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_invalidate_pdb_aperture_sys_mem_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_invalidate_pdb_addr_f(u32 v) +{ + return (v & 0xfffffffU) << 4U; +} +static inline u32 fb_mmu_invalidate_r(void) +{ + return 0x00100cbcU; +} +static inline u32 fb_mmu_invalidate_all_va_true_f(void) +{ + return 0x1U; +} +static inline u32 fb_mmu_invalidate_all_pdb_true_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_invalidate_hubtlb_only_s(void) +{ + return 1U; +} +static inline u32 fb_mmu_invalidate_hubtlb_only_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 fb_mmu_invalidate_hubtlb_only_m(void) +{ + return 0x1U << 2U; +} +static inline u32 fb_mmu_invalidate_hubtlb_only_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 fb_mmu_invalidate_hubtlb_only_true_f(void) +{ + return 0x4U; +} +static inline u32 fb_mmu_invalidate_replay_s(void) +{ + return 3U; +} +static inline u32 fb_mmu_invalidate_replay_f(u32 v) +{ + return (v & 0x7U) << 3U; +} +static inline u32 fb_mmu_invalidate_replay_m(void) +{ + return 0x7U << 3U; +} +static inline u32 fb_mmu_invalidate_replay_v(u32 r) +{ + return (r >> 3U) & 0x7U; +} +static inline u32 fb_mmu_invalidate_replay_none_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_invalidate_replay_start_f(void) +{ + return 0x8U; +} +static inline u32 fb_mmu_invalidate_replay_start_ack_all_f(void) +{ + return 0x10U; +} +static inline u32 fb_mmu_invalidate_replay_cancel_global_f(void) +{ + return 0x20U; +} +static inline u32 fb_mmu_invalidate_sys_membar_s(void) +{ + return 1U; +} +static inline u32 fb_mmu_invalidate_sys_membar_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 fb_mmu_invalidate_sys_membar_m(void) +{ + return 0x1U << 6U; +} +static inline u32 fb_mmu_invalidate_sys_membar_v(u32 r) +{ + return (r >> 6U) & 0x1U; +} +static inline u32 fb_mmu_invalidate_sys_membar_true_f(void) +{ + return 0x40U; +} +static inline u32 fb_mmu_invalidate_ack_s(void) +{ + return 2U; +} +static inline u32 fb_mmu_invalidate_ack_f(u32 v) +{ + return (v & 0x3U) << 7U; +} +static inline u32 fb_mmu_invalidate_ack_m(void) +{ + return 0x3U << 7U; +} +static inline u32 fb_mmu_invalidate_ack_v(u32 r) +{ + return (r >> 7U) & 0x3U; +} +static inline u32 fb_mmu_invalidate_ack_ack_none_required_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_invalidate_ack_ack_intranode_f(void) +{ + return 0x100U; +} +static inline u32 fb_mmu_invalidate_ack_ack_globally_f(void) +{ + return 0x80U; +} +static inline u32 fb_mmu_invalidate_cancel_client_id_s(void) +{ + return 6U; +} +static inline u32 fb_mmu_invalidate_cancel_client_id_f(u32 v) +{ + return (v & 0x3fU) << 9U; +} +static inline u32 fb_mmu_invalidate_cancel_client_id_m(void) +{ + return 0x3fU << 9U; +} +static inline u32 fb_mmu_invalidate_cancel_client_id_v(u32 r) +{ + return (r >> 9U) & 0x3fU; +} +static inline u32 fb_mmu_invalidate_cancel_gpc_id_s(void) +{ + return 5U; +} +static inline u32 fb_mmu_invalidate_cancel_gpc_id_f(u32 v) +{ + return (v & 0x1fU) << 15U; +} +static inline u32 fb_mmu_invalidate_cancel_gpc_id_m(void) +{ + return 0x1fU << 15U; +} +static inline u32 fb_mmu_invalidate_cancel_gpc_id_v(u32 r) +{ + return (r >> 15U) & 0x1fU; +} +static inline u32 fb_mmu_invalidate_cancel_client_type_s(void) +{ + return 1U; +} +static inline u32 fb_mmu_invalidate_cancel_client_type_f(u32 v) +{ + return (v & 0x1U) << 20U; +} +static inline u32 fb_mmu_invalidate_cancel_client_type_m(void) +{ + return 0x1U << 20U; +} +static inline u32 fb_mmu_invalidate_cancel_client_type_v(u32 r) +{ + return (r >> 20U) & 0x1U; +} +static inline u32 fb_mmu_invalidate_cancel_client_type_gpc_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_invalidate_cancel_client_type_hub_f(void) +{ + return 0x100000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_s(void) +{ + return 3U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_f(u32 v) +{ + return (v & 0x7U) << 24U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_m(void) +{ + return 0x7U << 24U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_v(u32 r) +{ + return (r >> 24U) & 0x7U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_all_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_pte_only_f(void) +{ + return 0x1000000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde0_f(void) +{ + return 0x2000000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde1_f(void) +{ + return 0x3000000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde2_f(void) +{ + return 0x4000000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde3_f(void) +{ + return 0x5000000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde4_f(void) +{ + return 0x6000000U; +} +static inline u32 fb_mmu_invalidate_cancel_cache_level_up_to_pde5_f(void) +{ + return 0x7000000U; +} +static inline u32 fb_mmu_invalidate_trigger_s(void) +{ + return 1U; +} +static inline u32 fb_mmu_invalidate_trigger_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 fb_mmu_invalidate_trigger_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_mmu_invalidate_trigger_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 fb_mmu_invalidate_trigger_true_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_mmu_debug_wr_r(void) +{ + return 0x00100cc8U; +} +static inline u32 fb_mmu_debug_wr_aperture_s(void) +{ + return 2U; +} +static inline u32 fb_mmu_debug_wr_aperture_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 fb_mmu_debug_wr_aperture_m(void) +{ + return 0x3U << 0U; +} +static inline u32 fb_mmu_debug_wr_aperture_v(u32 r) +{ + return (r >> 0U) & 0x3U; +} +static inline u32 fb_mmu_debug_wr_aperture_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_debug_wr_aperture_sys_mem_coh_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(void) +{ + return 0x3U; +} +static inline u32 fb_mmu_debug_wr_vol_false_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_debug_wr_vol_true_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_debug_wr_vol_true_f(void) +{ + return 0x4U; +} +static inline u32 fb_mmu_debug_wr_addr_f(u32 v) +{ + return (v & 0xfffffffU) << 4U; +} +static inline u32 fb_mmu_debug_wr_addr_alignment_v(void) +{ + return 0x0000000cU; +} +static inline u32 fb_mmu_debug_rd_r(void) +{ + return 0x00100cccU; +} +static inline u32 fb_mmu_debug_rd_aperture_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_debug_rd_aperture_sys_mem_coh_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_debug_rd_aperture_sys_mem_ncoh_f(void) +{ + return 0x3U; +} +static inline u32 fb_mmu_debug_rd_vol_false_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_debug_rd_addr_f(u32 v) +{ + return (v & 0xfffffffU) << 4U; +} +static inline u32 fb_mmu_debug_rd_addr_alignment_v(void) +{ + return 0x0000000cU; +} +static inline u32 fb_mmu_debug_ctrl_r(void) +{ + return 0x00100cc4U; +} +static inline u32 fb_mmu_debug_ctrl_debug_v(u32 r) +{ + return (r >> 16U) & 0x1U; +} +static inline u32 fb_mmu_debug_ctrl_debug_m(void) +{ + return 0x1U << 16U; +} +static inline u32 fb_mmu_debug_ctrl_debug_enabled_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_debug_ctrl_debug_disabled_v(void) +{ + return 0x00000000U; +} +static inline u32 fb_mmu_vpr_info_r(void) +{ + return 0x00100cd0U; +} +static inline u32 fb_mmu_vpr_info_fetch_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 fb_mmu_vpr_info_fetch_false_v(void) +{ + return 0x00000000U; +} +static inline u32 fb_mmu_vpr_info_fetch_true_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_l2tlb_ecc_status_r(void) +{ + return 0x00100e70U; +} +static inline u32 fb_mmu_l2tlb_ecc_status_corrected_err_l2tlb_sa_data_m(void) +{ + return 0x1U << 0U; +} +static inline u32 fb_mmu_l2tlb_ecc_status_uncorrected_err_l2tlb_sa_data_m(void) +{ + return 0x1U << 1U; +} +static inline u32 fb_mmu_l2tlb_ecc_status_corrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 16U; +} +static inline u32 fb_mmu_l2tlb_ecc_status_uncorrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 18U; +} +static inline u32 fb_mmu_l2tlb_ecc_status_reset_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 fb_mmu_l2tlb_ecc_status_reset_clear_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_r(void) +{ + return 0x00100e74U; +} +static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 fb_mmu_l2tlb_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_r(void) +{ + return 0x00100e78U; +} +static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 fb_mmu_l2tlb_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 fb_mmu_l2tlb_ecc_address_r(void) +{ + return 0x00100e7cU; +} +static inline u32 fb_mmu_l2tlb_ecc_address_index_s(void) +{ + return 32U; +} +static inline u32 fb_mmu_l2tlb_ecc_address_index_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 fb_mmu_l2tlb_ecc_address_index_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 fb_mmu_l2tlb_ecc_address_index_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 fb_mmu_hubtlb_ecc_status_r(void) +{ + return 0x00100e84U; +} +static inline u32 fb_mmu_hubtlb_ecc_status_corrected_err_sa_data_m(void) +{ + return 0x1U << 0U; +} +static inline u32 fb_mmu_hubtlb_ecc_status_uncorrected_err_sa_data_m(void) +{ + return 0x1U << 1U; +} +static inline u32 fb_mmu_hubtlb_ecc_status_corrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 16U; +} +static inline u32 fb_mmu_hubtlb_ecc_status_uncorrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 18U; +} +static inline u32 fb_mmu_hubtlb_ecc_status_reset_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 fb_mmu_hubtlb_ecc_status_reset_clear_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_r(void) +{ + return 0x00100e88U; +} +static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 fb_mmu_hubtlb_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_r(void) +{ + return 0x00100e8cU; +} +static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 fb_mmu_hubtlb_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 fb_mmu_hubtlb_ecc_address_r(void) +{ + return 0x00100e90U; +} +static inline u32 fb_mmu_hubtlb_ecc_address_index_s(void) +{ + return 32U; +} +static inline u32 fb_mmu_hubtlb_ecc_address_index_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 fb_mmu_hubtlb_ecc_address_index_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 fb_mmu_hubtlb_ecc_address_index_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 fb_mmu_fillunit_ecc_status_r(void) +{ + return 0x00100e98U; +} +static inline u32 fb_mmu_fillunit_ecc_status_corrected_err_pte_data_m(void) +{ + return 0x1U << 0U; +} +static inline u32 fb_mmu_fillunit_ecc_status_uncorrected_err_pte_data_m(void) +{ + return 0x1U << 1U; +} +static inline u32 fb_mmu_fillunit_ecc_status_corrected_err_pde0_data_m(void) +{ + return 0x1U << 2U; +} +static inline u32 fb_mmu_fillunit_ecc_status_uncorrected_err_pde0_data_m(void) +{ + return 0x1U << 3U; +} +static inline u32 fb_mmu_fillunit_ecc_status_corrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 16U; +} +static inline u32 fb_mmu_fillunit_ecc_status_uncorrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 18U; +} +static inline u32 fb_mmu_fillunit_ecc_status_reset_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 fb_mmu_fillunit_ecc_status_reset_clear_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_r(void) +{ + return 0x00100e9cU; +} +static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 fb_mmu_fillunit_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_r(void) +{ + return 0x00100ea0U; +} +static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 fb_mmu_fillunit_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 fb_mmu_fillunit_ecc_address_r(void) +{ + return 0x00100ea4U; +} +static inline u32 fb_mmu_fillunit_ecc_address_index_s(void) +{ + return 32U; +} +static inline u32 fb_mmu_fillunit_ecc_address_index_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 fb_mmu_fillunit_ecc_address_index_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 fb_mmu_fillunit_ecc_address_index_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 fb_niso_flush_sysmem_addr_r(void) +{ + return 0x00100c10U; +} +static inline u32 fb_niso_intr_r(void) +{ + return 0x00100a20U; +} +static inline u32 fb_niso_intr_hub_access_counter_notify_m(void) +{ + return 0x1U << 0U; +} +static inline u32 fb_niso_intr_hub_access_counter_notify_pending_f(void) +{ + return 0x1U; +} +static inline u32 fb_niso_intr_hub_access_counter_error_m(void) +{ + return 0x1U << 1U; +} +static inline u32 fb_niso_intr_hub_access_counter_error_pending_f(void) +{ + return 0x2U; +} +static inline u32 fb_niso_intr_mmu_replayable_fault_notify_m(void) +{ + return 0x1U << 27U; +} +static inline u32 fb_niso_intr_mmu_replayable_fault_notify_pending_f(void) +{ + return 0x8000000U; +} +static inline u32 fb_niso_intr_mmu_replayable_fault_overflow_m(void) +{ + return 0x1U << 28U; +} +static inline u32 fb_niso_intr_mmu_replayable_fault_overflow_pending_f(void) +{ + return 0x10000000U; +} +static inline u32 fb_niso_intr_mmu_nonreplayable_fault_notify_m(void) +{ + return 0x1U << 29U; +} +static inline u32 fb_niso_intr_mmu_nonreplayable_fault_notify_pending_f(void) +{ + return 0x20000000U; +} +static inline u32 fb_niso_intr_mmu_nonreplayable_fault_overflow_m(void) +{ + return 0x1U << 30U; +} +static inline u32 fb_niso_intr_mmu_nonreplayable_fault_overflow_pending_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_niso_intr_mmu_other_fault_notify_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_niso_intr_mmu_other_fault_notify_pending_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_niso_intr_mmu_ecc_uncorrected_error_notify_m(void) +{ + return 0x1U << 26U; +} +static inline u32 fb_niso_intr_mmu_ecc_uncorrected_error_notify_pending_f(void) +{ + return 0x4000000U; +} +static inline u32 fb_niso_intr_en_r(u32 i) +{ + return 0x00100a24U + i*4U; +} +static inline u32 fb_niso_intr_en__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_niso_intr_en_hub_access_counter_notify_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 fb_niso_intr_en_hub_access_counter_notify_enabled_f(void) +{ + return 0x1U; +} +static inline u32 fb_niso_intr_en_hub_access_counter_error_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 fb_niso_intr_en_hub_access_counter_error_enabled_f(void) +{ + return 0x2U; +} +static inline u32 fb_niso_intr_en_mmu_replayable_fault_notify_f(u32 v) +{ + return (v & 0x1U) << 27U; +} +static inline u32 fb_niso_intr_en_mmu_replayable_fault_notify_enabled_f(void) +{ + return 0x8000000U; +} +static inline u32 fb_niso_intr_en_mmu_replayable_fault_overflow_f(u32 v) +{ + return (v & 0x1U) << 28U; +} +static inline u32 fb_niso_intr_en_mmu_replayable_fault_overflow_enabled_f(void) +{ + return 0x10000000U; +} +static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_notify_f(u32 v) +{ + return (v & 0x1U) << 29U; +} +static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_notify_enabled_f(void) +{ + return 0x20000000U; +} +static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_overflow_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 fb_niso_intr_en_mmu_nonreplayable_fault_overflow_enabled_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_niso_intr_en_mmu_other_fault_notify_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 fb_niso_intr_en_mmu_other_fault_notify_enabled_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_niso_intr_en_mmu_ecc_uncorrected_error_notify_f(u32 v) +{ + return (v & 0x1U) << 26U; +} +static inline u32 fb_niso_intr_en_mmu_ecc_uncorrected_error_notify_enabled_f(void) +{ + return 0x4000000U; +} +static inline u32 fb_niso_intr_en_set_r(u32 i) +{ + return 0x00100a2cU + i*4U; +} +static inline u32 fb_niso_intr_en_set__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_niso_intr_en_set_hub_access_counter_notify_m(void) +{ + return 0x1U << 0U; +} +static inline u32 fb_niso_intr_en_set_hub_access_counter_notify_set_f(void) +{ + return 0x1U; +} +static inline u32 fb_niso_intr_en_set_hub_access_counter_error_m(void) +{ + return 0x1U << 1U; +} +static inline u32 fb_niso_intr_en_set_hub_access_counter_error_set_f(void) +{ + return 0x2U; +} +static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_notify_m(void) +{ + return 0x1U << 27U; +} +static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_notify_set_f(void) +{ + return 0x8000000U; +} +static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_overflow_m(void) +{ + return 0x1U << 28U; +} +static inline u32 fb_niso_intr_en_set_mmu_replayable_fault_overflow_set_f(void) +{ + return 0x10000000U; +} +static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_m(void) +{ + return 0x1U << 29U; +} +static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_notify_set_f(void) +{ + return 0x20000000U; +} +static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_m(void) +{ + return 0x1U << 30U; +} +static inline u32 fb_niso_intr_en_set_mmu_nonreplayable_fault_overflow_set_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_niso_intr_en_set_mmu_other_fault_notify_set_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_m(void) +{ + return 0x1U << 26U; +} +static inline u32 fb_niso_intr_en_set_mmu_ecc_uncorrected_error_notify_set_f(void) +{ + return 0x4000000U; +} +static inline u32 fb_niso_intr_en_clr_r(u32 i) +{ + return 0x00100a34U + i*4U; +} +static inline u32 fb_niso_intr_en_clr__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_niso_intr_en_clr_hub_access_counter_notify_m(void) +{ + return 0x1U << 0U; +} +static inline u32 fb_niso_intr_en_clr_hub_access_counter_notify_set_f(void) +{ + return 0x1U; +} +static inline u32 fb_niso_intr_en_clr_hub_access_counter_error_m(void) +{ + return 0x1U << 1U; +} +static inline u32 fb_niso_intr_en_clr_hub_access_counter_error_set_f(void) +{ + return 0x2U; +} +static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_notify_m(void) +{ + return 0x1U << 27U; +} +static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_notify_set_f(void) +{ + return 0x8000000U; +} +static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_m(void) +{ + return 0x1U << 28U; +} +static inline u32 fb_niso_intr_en_clr_mmu_replayable_fault_overflow_set_f(void) +{ + return 0x10000000U; +} +static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_m(void) +{ + return 0x1U << 29U; +} +static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_notify_set_f(void) +{ + return 0x20000000U; +} +static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_m(void) +{ + return 0x1U << 30U; +} +static inline u32 fb_niso_intr_en_clr_mmu_nonreplayable_fault_overflow_set_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_niso_intr_en_clr_mmu_other_fault_notify_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_niso_intr_en_clr_mmu_other_fault_notify_set_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_m(void) +{ + return 0x1U << 26U; +} +static inline u32 fb_niso_intr_en_clr_mmu_ecc_uncorrected_error_notify_set_f(void) +{ + return 0x4000000U; +} +static inline u32 fb_niso_intr_en_clr_mmu_non_replay_fault_buffer_v(void) +{ + return 0x00000000U; +} +static inline u32 fb_niso_intr_en_clr_mmu_replay_fault_buffer_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_lo_r(u32 i) +{ + return 0x00100e24U + i*20U; +} +static inline u32 fb_mmu_fault_buffer_lo__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_mode_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_mode_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_mode_virtual_v(void) +{ + return 0x00000000U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_mode_virtual_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_mode_physical_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_mode_physical_f(void) +{ + return 0x1U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_f(u32 v) +{ + return (v & 0x3U) << 1U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_v(u32 r) +{ + return (r >> 1U) & 0x3U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_coh_f(void) +{ + return 0x4U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_nocoh_v(void) +{ + return 0x00000003U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_aperture_sys_nocoh_f(void) +{ + return 0x6U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_vol_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 fb_mmu_fault_buffer_lo_phys_vol_v(u32 r) +{ + return (r >> 3U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 fb_mmu_fault_buffer_lo_addr_v(u32 r) +{ + return (r >> 12U) & 0xfffffU; +} +static inline u32 fb_mmu_fault_buffer_hi_r(u32 i) +{ + return 0x00100e28U + i*20U; +} +static inline u32 fb_mmu_fault_buffer_hi__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_buffer_hi_addr_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 fb_mmu_fault_buffer_hi_addr_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 fb_mmu_fault_buffer_get_r(u32 i) +{ + return 0x00100e2cU + i*20U; +} +static inline u32 fb_mmu_fault_buffer_get__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_buffer_get_ptr_f(u32 v) +{ + return (v & 0xfffffU) << 0U; +} +static inline u32 fb_mmu_fault_buffer_get_ptr_m(void) +{ + return 0xfffffU << 0U; +} +static inline u32 fb_mmu_fault_buffer_get_ptr_v(u32 r) +{ + return (r >> 0U) & 0xfffffU; +} +static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_m(void) +{ + return 0x1U << 30U; +} +static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_get_getptr_corrupted_clear_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_mmu_fault_buffer_get_overflow_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 fb_mmu_fault_buffer_get_overflow_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_mmu_fault_buffer_get_overflow_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_get_overflow_clear_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_mmu_fault_buffer_put_r(u32 i) +{ + return 0x00100e30U + i*20U; +} +static inline u32 fb_mmu_fault_buffer_put__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_buffer_put_ptr_f(u32 v) +{ + return (v & 0xfffffU) << 0U; +} +static inline u32 fb_mmu_fault_buffer_put_ptr_v(u32 r) +{ + return (r >> 0U) & 0xfffffU; +} +static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_yes_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_yes_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_no_v(void) +{ + return 0x00000000U; +} +static inline u32 fb_mmu_fault_buffer_put_getptr_corrupted_no_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_buffer_put_overflow_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 fb_mmu_fault_buffer_put_overflow_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_put_overflow_yes_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_put_overflow_yes_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_mmu_fault_buffer_size_r(u32 i) +{ + return 0x00100e34U + i*20U; +} +static inline u32 fb_mmu_fault_buffer_size__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_buffer_size_val_f(u32 v) +{ + return (v & 0xfffffU) << 0U; +} +static inline u32 fb_mmu_fault_buffer_size_val_v(u32 r) +{ + return (r >> 0U) & 0xfffffU; +} +static inline u32 fb_mmu_fault_buffer_size_overflow_intr_f(u32 v) +{ + return (v & 0x1U) << 29U; +} +static inline u32 fb_mmu_fault_buffer_size_overflow_intr_v(u32 r) +{ + return (r >> 29U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_size_overflow_intr_enable_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_size_overflow_intr_enable_f(void) +{ + return 0x20000000U; +} +static inline u32 fb_mmu_fault_buffer_size_set_default_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 fb_mmu_fault_buffer_size_set_default_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_size_set_default_yes_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_size_set_default_yes_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_mmu_fault_buffer_size_enable_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 fb_mmu_fault_buffer_size_enable_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_mmu_fault_buffer_size_enable_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 fb_mmu_fault_buffer_size_enable_true_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_buffer_size_enable_true_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_mmu_fault_addr_lo_r(void) +{ + return 0x00100e4cU; +} +static inline u32 fb_mmu_fault_addr_lo_phys_aperture_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 fb_mmu_fault_addr_lo_phys_aperture_v(u32 r) +{ + return (r >> 0U) & 0x3U; +} +static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_coh_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_nocoh_v(void) +{ + return 0x00000003U; +} +static inline u32 fb_mmu_fault_addr_lo_phys_aperture_sys_nocoh_f(void) +{ + return 0x3U; +} +static inline u32 fb_mmu_fault_addr_lo_addr_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 fb_mmu_fault_addr_lo_addr_v(u32 r) +{ + return (r >> 12U) & 0xfffffU; +} +static inline u32 fb_mmu_fault_addr_hi_r(void) +{ + return 0x00100e50U; +} +static inline u32 fb_mmu_fault_addr_hi_addr_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 fb_mmu_fault_addr_hi_addr_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 fb_mmu_fault_inst_lo_r(void) +{ + return 0x00100e54U; +} +static inline u32 fb_mmu_fault_inst_lo_engine_id_v(u32 r) +{ + return (r >> 0U) & 0x1ffU; +} +static inline u32 fb_mmu_fault_inst_lo_aperture_v(u32 r) +{ + return (r >> 10U) & 0x3U; +} +static inline u32 fb_mmu_fault_inst_lo_aperture_sys_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 fb_mmu_fault_inst_lo_aperture_sys_nocoh_v(void) +{ + return 0x00000003U; +} +static inline u32 fb_mmu_fault_inst_lo_addr_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 fb_mmu_fault_inst_lo_addr_v(u32 r) +{ + return (r >> 12U) & 0xfffffU; +} +static inline u32 fb_mmu_fault_inst_hi_r(void) +{ + return 0x00100e58U; +} +static inline u32 fb_mmu_fault_inst_hi_addr_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 fb_mmu_fault_info_r(void) +{ + return 0x00100e5cU; +} +static inline u32 fb_mmu_fault_info_fault_type_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 fb_mmu_fault_info_replayable_fault_v(u32 r) +{ + return (r >> 7U) & 0x1U; +} +static inline u32 fb_mmu_fault_info_client_v(u32 r) +{ + return (r >> 8U) & 0x7fU; +} +static inline u32 fb_mmu_fault_info_access_type_v(u32 r) +{ + return (r >> 16U) & 0xfU; +} +static inline u32 fb_mmu_fault_info_client_type_v(u32 r) +{ + return (r >> 20U) & 0x1U; +} +static inline u32 fb_mmu_fault_info_gpc_id_v(u32 r) +{ + return (r >> 24U) & 0x1fU; +} +static inline u32 fb_mmu_fault_info_protected_mode_v(u32 r) +{ + return (r >> 29U) & 0x1U; +} +static inline u32 fb_mmu_fault_info_replayable_fault_en_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 fb_mmu_fault_info_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 fb_mmu_fault_status_r(void) +{ + return 0x00100e60U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_phys_m(void) +{ + return 0x1U << 0U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_phys_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_phys_set_f(void) +{ + return 0x1U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_phys_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_phys_clear_f(void) +{ + return 0x1U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_virt_m(void) +{ + return 0x1U << 1U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_virt_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_virt_set_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_virt_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar1_virt_clear_f(void) +{ + return 0x2U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_phys_m(void) +{ + return 0x1U << 2U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_phys_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_phys_set_f(void) +{ + return 0x4U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_phys_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_phys_clear_f(void) +{ + return 0x4U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_virt_m(void) +{ + return 0x1U << 3U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_virt_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_virt_set_f(void) +{ + return 0x8U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_virt_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_bar2_virt_clear_f(void) +{ + return 0x8U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_phys_m(void) +{ + return 0x1U << 4U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_phys_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_phys_set_f(void) +{ + return 0x10U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_phys_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_phys_clear_f(void) +{ + return 0x10U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_virt_m(void) +{ + return 0x1U << 5U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_virt_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_virt_set_f(void) +{ + return 0x20U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_virt_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_ifb_virt_clear_f(void) +{ + return 0x20U; +} +static inline u32 fb_mmu_fault_status_dropped_other_phys_m(void) +{ + return 0x1U << 6U; +} +static inline u32 fb_mmu_fault_status_dropped_other_phys_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_other_phys_set_f(void) +{ + return 0x40U; +} +static inline u32 fb_mmu_fault_status_dropped_other_phys_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_other_phys_clear_f(void) +{ + return 0x40U; +} +static inline u32 fb_mmu_fault_status_dropped_other_virt_m(void) +{ + return 0x1U << 7U; +} +static inline u32 fb_mmu_fault_status_dropped_other_virt_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_other_virt_set_f(void) +{ + return 0x80U; +} +static inline u32 fb_mmu_fault_status_dropped_other_virt_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_dropped_other_virt_clear_f(void) +{ + return 0x80U; +} +static inline u32 fb_mmu_fault_status_replayable_m(void) +{ + return 0x1U << 8U; +} +static inline u32 fb_mmu_fault_status_replayable_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_replayable_set_f(void) +{ + return 0x100U; +} +static inline u32 fb_mmu_fault_status_replayable_reset_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_status_non_replayable_m(void) +{ + return 0x1U << 9U; +} +static inline u32 fb_mmu_fault_status_non_replayable_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_non_replayable_set_f(void) +{ + return 0x200U; +} +static inline u32 fb_mmu_fault_status_non_replayable_reset_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_status_replayable_error_m(void) +{ + return 0x1U << 10U; +} +static inline u32 fb_mmu_fault_status_replayable_error_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_replayable_error_set_f(void) +{ + return 0x400U; +} +static inline u32 fb_mmu_fault_status_replayable_error_reset_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_status_non_replayable_error_m(void) +{ + return 0x1U << 11U; +} +static inline u32 fb_mmu_fault_status_non_replayable_error_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_non_replayable_error_set_f(void) +{ + return 0x800U; +} +static inline u32 fb_mmu_fault_status_non_replayable_error_reset_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_status_replayable_overflow_m(void) +{ + return 0x1U << 12U; +} +static inline u32 fb_mmu_fault_status_replayable_overflow_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_replayable_overflow_set_f(void) +{ + return 0x1000U; +} +static inline u32 fb_mmu_fault_status_replayable_overflow_reset_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_status_non_replayable_overflow_m(void) +{ + return 0x1U << 13U; +} +static inline u32 fb_mmu_fault_status_non_replayable_overflow_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_non_replayable_overflow_set_f(void) +{ + return 0x2000U; +} +static inline u32 fb_mmu_fault_status_non_replayable_overflow_reset_f(void) +{ + return 0x0U; +} +static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_m(void) +{ + return 0x1U << 14U; +} +static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_replayable_getptr_corrupted_set_f(void) +{ + return 0x4000U; +} +static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_m(void) +{ + return 0x1U << 15U; +} +static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_non_replayable_getptr_corrupted_set_f(void) +{ + return 0x8000U; +} +static inline u32 fb_mmu_fault_status_busy_m(void) +{ + return 0x1U << 30U; +} +static inline u32 fb_mmu_fault_status_busy_true_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_busy_true_f(void) +{ + return 0x40000000U; +} +static inline u32 fb_mmu_fault_status_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fb_mmu_fault_status_valid_set_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_valid_set_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_mmu_fault_status_valid_clear_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_fault_status_valid_clear_f(void) +{ + return 0x80000000U; +} +static inline u32 fb_mmu_num_active_ltcs_r(void) +{ + return 0x00100ec0U; +} +static inline u32 fb_mmu_num_active_ltcs_count_f(u32 v) +{ + return (v & 0x1fU) << 0U; +} +static inline u32 fb_mmu_num_active_ltcs_count_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 fb_mmu_cbc_base_r(void) +{ + return 0x00100ec4U; +} +static inline u32 fb_mmu_cbc_base_address_f(u32 v) +{ + return (v & 0x3ffffffU) << 0U; +} +static inline u32 fb_mmu_cbc_base_address_v(u32 r) +{ + return (r >> 0U) & 0x3ffffffU; +} +static inline u32 fb_mmu_cbc_base_address_alignment_shift_v(void) +{ + return 0x0000000bU; +} +static inline u32 fb_mmu_cbc_top_r(void) +{ + return 0x00100ec8U; +} +static inline u32 fb_mmu_cbc_top_size_f(u32 v) +{ + return (v & 0x7fffU) << 0U; +} +static inline u32 fb_mmu_cbc_top_size_v(u32 r) +{ + return (r >> 0U) & 0x7fffU; +} +static inline u32 fb_mmu_cbc_top_size_alignment_shift_v(void) +{ + return 0x0000000bU; +} +static inline u32 fb_mmu_cbc_max_r(void) +{ + return 0x00100eccU; +} +static inline u32 fb_mmu_cbc_max_comptagline_f(u32 v) +{ + return (v & 0xffffffU) << 0U; +} +static inline u32 fb_mmu_cbc_max_comptagline_v(u32 r) +{ + return (r >> 0U) & 0xffffffU; +} +static inline u32 fb_mmu_cbc_max_safe_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 fb_mmu_cbc_max_safe_true_v(void) +{ + return 0x00000001U; +} +static inline u32 fb_mmu_cbc_max_safe_false_v(void) +{ + return 0x00000000U; +} +static inline u32 fb_mmu_cbc_max_unsafe_fault_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 fb_mmu_cbc_max_unsafe_fault_enabled_v(void) +{ + return 0x00000000U; +} +static inline u32 fb_mmu_cbc_max_unsafe_fault_disabled_v(void) +{ + return 0x00000001U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h new file mode 100644 index 000000000..59cc7a1d7 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fifo_gv11b.h @@ -0,0 +1,687 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_fifo_gv11b_h_ +#define _hw_fifo_gv11b_h_ + +static inline u32 fifo_bar1_base_r(void) +{ + return 0x00002254U; +} +static inline u32 fifo_bar1_base_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 fifo_bar1_base_ptr_align_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 fifo_bar1_base_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 fifo_bar1_base_valid_true_f(void) +{ + return 0x10000000U; +} +static inline u32 fifo_userd_writeback_r(void) +{ + return 0x0000225cU; +} +static inline u32 fifo_userd_writeback_timer_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 fifo_userd_writeback_timer_disabled_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_userd_writeback_timer_shorter_v(void) +{ + return 0x00000003U; +} +static inline u32 fifo_userd_writeback_timer_100us_v(void) +{ + return 0x00000064U; +} +static inline u32 fifo_userd_writeback_timescale_f(u32 v) +{ + return (v & 0xfU) << 12U; +} +static inline u32 fifo_userd_writeback_timescale_0_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_runlist_base_r(void) +{ + return 0x00002270U; +} +static inline u32 fifo_runlist_base_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 fifo_runlist_base_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 fifo_runlist_base_target_sys_mem_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 fifo_runlist_base_target_sys_mem_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 fifo_runlist_r(void) +{ + return 0x00002274U; +} +static inline u32 fifo_runlist_engine_f(u32 v) +{ + return (v & 0xfU) << 20U; +} +static inline u32 fifo_eng_runlist_base_r(u32 i) +{ + return 0x00002280U + i*8U; +} +static inline u32 fifo_eng_runlist_base__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fifo_eng_runlist_r(u32 i) +{ + return 0x00002284U + i*8U; +} +static inline u32 fifo_eng_runlist__size_1_v(void) +{ + return 0x00000002U; +} +static inline u32 fifo_eng_runlist_length_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 fifo_eng_runlist_length_max_v(void) +{ + return 0x0000ffffU; +} +static inline u32 fifo_eng_runlist_pending_true_f(void) +{ + return 0x100000U; +} +static inline u32 fifo_pb_timeslice_r(u32 i) +{ + return 0x00002350U + i*4U; +} +static inline u32 fifo_pb_timeslice_timeout_16_f(void) +{ + return 0x10U; +} +static inline u32 fifo_pb_timeslice_timescale_0_f(void) +{ + return 0x0U; +} +static inline u32 fifo_pb_timeslice_enable_true_f(void) +{ + return 0x10000000U; +} +static inline u32 fifo_pbdma_map_r(u32 i) +{ + return 0x00002390U + i*4U; +} +static inline u32 fifo_intr_0_r(void) +{ + return 0x00002100U; +} +static inline u32 fifo_intr_0_bind_error_pending_f(void) +{ + return 0x1U; +} +static inline u32 fifo_intr_0_bind_error_reset_f(void) +{ + return 0x1U; +} +static inline u32 fifo_intr_0_sched_error_pending_f(void) +{ + return 0x100U; +} +static inline u32 fifo_intr_0_sched_error_reset_f(void) +{ + return 0x100U; +} +static inline u32 fifo_intr_0_chsw_error_pending_f(void) +{ + return 0x10000U; +} +static inline u32 fifo_intr_0_chsw_error_reset_f(void) +{ + return 0x10000U; +} +static inline u32 fifo_intr_0_fb_flush_timeout_pending_f(void) +{ + return 0x800000U; +} +static inline u32 fifo_intr_0_fb_flush_timeout_reset_f(void) +{ + return 0x800000U; +} +static inline u32 fifo_intr_0_lb_error_pending_f(void) +{ + return 0x1000000U; +} +static inline u32 fifo_intr_0_lb_error_reset_f(void) +{ + return 0x1000000U; +} +static inline u32 fifo_intr_0_pbdma_intr_pending_f(void) +{ + return 0x20000000U; +} +static inline u32 fifo_intr_0_runlist_event_pending_f(void) +{ + return 0x40000000U; +} +static inline u32 fifo_intr_0_channel_intr_pending_f(void) +{ + return 0x80000000U; +} +static inline u32 fifo_intr_0_ctxsw_timeout_pending_f(void) +{ + return 0x2U; +} +static inline u32 fifo_intr_en_0_r(void) +{ + return 0x00002140U; +} +static inline u32 fifo_intr_en_0_sched_error_f(u32 v) +{ + return (v & 0x1U) << 8U; +} +static inline u32 fifo_intr_en_0_sched_error_m(void) +{ + return 0x1U << 8U; +} +static inline u32 fifo_intr_en_0_ctxsw_timeout_pending_f(void) +{ + return 0x2U; +} +static inline u32 fifo_intr_en_1_r(void) +{ + return 0x00002528U; +} +static inline u32 fifo_intr_bind_error_r(void) +{ + return 0x0000252cU; +} +static inline u32 fifo_intr_sched_error_r(void) +{ + return 0x0000254cU; +} +static inline u32 fifo_intr_sched_error_code_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 fifo_intr_chsw_error_r(void) +{ + return 0x0000256cU; +} +static inline u32 fifo_intr_ctxsw_timeout_r(void) +{ + return 0x00002a30U; +} +static inline u32 fifo_intr_ctxsw_timeout_engine_f(u32 v, u32 i) +{ + return (v & 0x1U) << (0U + i*1U); +} +static inline u32 fifo_intr_ctxsw_timeout_engine_v(u32 r, u32 i) +{ + return (r >> (0U + i*1U)) & 0x1U; +} +static inline u32 fifo_intr_ctxsw_timeout_engine__size_1_v(void) +{ + return 0x00000020U; +} +static inline u32 fifo_intr_ctxsw_timeout_engine_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_intr_ctxsw_timeout_engine_pending_f(u32 i) +{ + return 0x1U << (0U + i*1U); +} +static inline u32 fifo_intr_ctxsw_timeout_info_r(u32 i) +{ + return 0x00003200U + i*4U; +} +static inline u32 fifo_intr_ctxsw_timeout_info__size_1_v(void) +{ + return 0x00000004U; +} +static inline u32 fifo_intr_ctxsw_timeout_info_ctxsw_state_v(u32 r) +{ + return (r >> 14U) & 0x3U; +} +static inline u32 fifo_intr_ctxsw_timeout_info_ctxsw_state_load_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_intr_ctxsw_timeout_info_ctxsw_state_save_v(void) +{ + return 0x00000002U; +} +static inline u32 fifo_intr_ctxsw_timeout_info_ctxsw_state_switch_v(void) +{ + return 0x00000003U; +} +static inline u32 fifo_intr_ctxsw_timeout_info_prev_tsgid_v(u32 r) +{ + return (r >> 0U) & 0x3fffU; +} +static inline u32 fifo_intr_ctxsw_timeout_info_next_tsgid_v(u32 r) +{ + return (r >> 16U) & 0x3fffU; +} +static inline u32 fifo_intr_ctxsw_timeout_info_status_v(u32 r) +{ + return (r >> 30U) & 0x3U; +} +static inline u32 fifo_intr_ctxsw_timeout_info_status_awaiting_ack_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_intr_ctxsw_timeout_info_status_eng_was_reset_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_intr_ctxsw_timeout_info_status_ack_received_v(void) +{ + return 0x00000002U; +} +static inline u32 fifo_intr_ctxsw_timeout_info_status_dropped_timeout_v(void) +{ + return 0x00000003U; +} +static inline u32 fifo_intr_pbdma_id_r(void) +{ + return 0x000025a0U; +} +static inline u32 fifo_intr_pbdma_id_status_f(u32 v, u32 i) +{ + return (v & 0x1U) << (0U + i*1U); +} +static inline u32 fifo_intr_pbdma_id_status_v(u32 r, u32 i) +{ + return (r >> (0U + i*1U)) & 0x1U; +} +static inline u32 fifo_intr_pbdma_id_status__size_1_v(void) +{ + return 0x00000003U; +} +static inline u32 fifo_intr_runlist_r(void) +{ + return 0x00002a00U; +} +static inline u32 fifo_fb_timeout_r(void) +{ + return 0x00002a04U; +} +static inline u32 fifo_fb_timeout_period_m(void) +{ + return 0x3fffffffU << 0U; +} +static inline u32 fifo_fb_timeout_period_max_f(void) +{ + return 0x3fffffffU; +} +static inline u32 fifo_fb_timeout_period_init_f(void) +{ + return 0x3c00U; +} +static inline u32 fifo_fb_timeout_detection_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fifo_fb_timeout_detection_enabled_f(void) +{ + return 0x80000000U; +} +static inline u32 fifo_fb_timeout_detection_disabled_f(void) +{ + return 0x0U; +} +static inline u32 fifo_sched_disable_r(void) +{ + return 0x00002630U; +} +static inline u32 fifo_sched_disable_runlist_f(u32 v, u32 i) +{ + return (v & 0x1U) << (0U + i*1U); +} +static inline u32 fifo_sched_disable_runlist_m(u32 i) +{ + return 0x1U << (0U + i*1U); +} +static inline u32 fifo_sched_disable_true_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_runlist_preempt_r(void) +{ + return 0x00002638U; +} +static inline u32 fifo_runlist_preempt_runlist_f(u32 v, u32 i) +{ + return (v & 0x1U) << (0U + i*1U); +} +static inline u32 fifo_runlist_preempt_runlist_m(u32 i) +{ + return 0x1U << (0U + i*1U); +} +static inline u32 fifo_runlist_preempt_runlist_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_preempt_r(void) +{ + return 0x00002634U; +} +static inline u32 fifo_preempt_pending_true_f(void) +{ + return 0x100000U; +} +static inline u32 fifo_preempt_type_channel_f(void) +{ + return 0x0U; +} +static inline u32 fifo_preempt_type_tsg_f(void) +{ + return 0x1000000U; +} +static inline u32 fifo_preempt_chid_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 fifo_preempt_id_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 fifo_engine_status_r(u32 i) +{ + return 0x00002640U + i*8U; +} +static inline u32 fifo_engine_status__size_1_v(void) +{ + return 0x00000004U; +} +static inline u32 fifo_engine_status_id_v(u32 r) +{ + return (r >> 0U) & 0xfffU; +} +static inline u32 fifo_engine_status_id_type_v(u32 r) +{ + return (r >> 12U) & 0x1U; +} +static inline u32 fifo_engine_status_id_type_chid_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_engine_status_id_type_tsgid_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_engine_status_ctx_status_v(u32 r) +{ + return (r >> 13U) & 0x7U; +} +static inline u32 fifo_engine_status_ctx_status_valid_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_engine_status_ctx_status_ctxsw_load_v(void) +{ + return 0x00000005U; +} +static inline u32 fifo_engine_status_ctx_status_ctxsw_save_v(void) +{ + return 0x00000006U; +} +static inline u32 fifo_engine_status_ctx_status_ctxsw_switch_v(void) +{ + return 0x00000007U; +} +static inline u32 fifo_engine_status_next_id_v(u32 r) +{ + return (r >> 16U) & 0xfffU; +} +static inline u32 fifo_engine_status_next_id_type_v(u32 r) +{ + return (r >> 28U) & 0x1U; +} +static inline u32 fifo_engine_status_next_id_type_chid_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_engine_status_eng_reload_v(u32 r) +{ + return (r >> 29U) & 0x1U; +} +static inline u32 fifo_engine_status_faulted_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 fifo_engine_status_faulted_true_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_engine_status_engine_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 fifo_engine_status_engine_idle_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_engine_status_engine_busy_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_engine_status_ctxsw_v(u32 r) +{ + return (r >> 15U) & 0x1U; +} +static inline u32 fifo_engine_status_ctxsw_in_progress_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_engine_status_ctxsw_in_progress_f(void) +{ + return 0x8000U; +} +static inline u32 fifo_eng_ctxsw_timeout_r(void) +{ + return 0x00002a0cU; +} +static inline u32 fifo_eng_ctxsw_timeout_period_f(u32 v) +{ + return (v & 0x7fffffffU) << 0U; +} +static inline u32 fifo_eng_ctxsw_timeout_period_m(void) +{ + return 0x7fffffffU << 0U; +} +static inline u32 fifo_eng_ctxsw_timeout_period_v(u32 r) +{ + return (r >> 0U) & 0x7fffffffU; +} +static inline u32 fifo_eng_ctxsw_timeout_period_init_f(void) +{ + return 0x3fffffU; +} +static inline u32 fifo_eng_ctxsw_timeout_period_max_f(void) +{ + return 0x7fffffffU; +} +static inline u32 fifo_eng_ctxsw_timeout_detection_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 fifo_eng_ctxsw_timeout_detection_m(void) +{ + return 0x1U << 31U; +} +static inline u32 fifo_eng_ctxsw_timeout_detection_enabled_f(void) +{ + return 0x80000000U; +} +static inline u32 fifo_eng_ctxsw_timeout_detection_disabled_f(void) +{ + return 0x0U; +} +static inline u32 fifo_pbdma_status_r(u32 i) +{ + return 0x00003080U + i*4U; +} +static inline u32 fifo_pbdma_status__size_1_v(void) +{ + return 0x00000003U; +} +static inline u32 fifo_pbdma_status_id_v(u32 r) +{ + return (r >> 0U) & 0xfffU; +} +static inline u32 fifo_pbdma_status_id_type_v(u32 r) +{ + return (r >> 12U) & 0x1U; +} +static inline u32 fifo_pbdma_status_id_type_chid_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_pbdma_status_id_type_tsgid_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_pbdma_status_chan_status_v(u32 r) +{ + return (r >> 13U) & 0x7U; +} +static inline u32 fifo_pbdma_status_chan_status_valid_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_pbdma_status_chan_status_chsw_load_v(void) +{ + return 0x00000005U; +} +static inline u32 fifo_pbdma_status_chan_status_chsw_save_v(void) +{ + return 0x00000006U; +} +static inline u32 fifo_pbdma_status_chan_status_chsw_switch_v(void) +{ + return 0x00000007U; +} +static inline u32 fifo_pbdma_status_next_id_v(u32 r) +{ + return (r >> 16U) & 0xfffU; +} +static inline u32 fifo_pbdma_status_next_id_type_v(u32 r) +{ + return (r >> 28U) & 0x1U; +} +static inline u32 fifo_pbdma_status_next_id_type_chid_v(void) +{ + return 0x00000000U; +} +static inline u32 fifo_pbdma_status_chsw_v(u32 r) +{ + return (r >> 15U) & 0x1U; +} +static inline u32 fifo_pbdma_status_chsw_in_progress_v(void) +{ + return 0x00000001U; +} +static inline u32 fifo_cfg0_r(void) +{ + return 0x00002004U; +} +static inline u32 fifo_cfg0_num_pbdma_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 fifo_cfg0_pbdma_fault_id_v(u32 r) +{ + return (r >> 16U) & 0xffU; +} +static inline u32 fifo_fb_iface_r(void) +{ + return 0x000026f0U; +} +static inline u32 fifo_fb_iface_control_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 fifo_fb_iface_control_enable_f(void) +{ + return 0x1U; +} +static inline u32 fifo_fb_iface_status_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 fifo_fb_iface_status_enabled_f(void) +{ + return 0x10U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_flush_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_flush_gv11b.h new file mode 100644 index 000000000..45c01de01 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_flush_gv11b.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_flush_gv11b_h_ +#define _hw_flush_gv11b_h_ + +static inline u32 flush_l2_system_invalidate_r(void) +{ + return 0x00070004U; +} +static inline u32 flush_l2_system_invalidate_pending_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 flush_l2_system_invalidate_pending_busy_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_l2_system_invalidate_pending_busy_f(void) +{ + return 0x1U; +} +static inline u32 flush_l2_system_invalidate_outstanding_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 flush_l2_system_invalidate_outstanding_true_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_l2_flush_dirty_r(void) +{ + return 0x00070010U; +} +static inline u32 flush_l2_flush_dirty_pending_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 flush_l2_flush_dirty_pending_empty_v(void) +{ + return 0x00000000U; +} +static inline u32 flush_l2_flush_dirty_pending_empty_f(void) +{ + return 0x0U; +} +static inline u32 flush_l2_flush_dirty_pending_busy_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_l2_flush_dirty_pending_busy_f(void) +{ + return 0x1U; +} +static inline u32 flush_l2_flush_dirty_outstanding_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 flush_l2_flush_dirty_outstanding_false_v(void) +{ + return 0x00000000U; +} +static inline u32 flush_l2_flush_dirty_outstanding_false_f(void) +{ + return 0x0U; +} +static inline u32 flush_l2_flush_dirty_outstanding_true_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_l2_clean_comptags_r(void) +{ + return 0x0007000cU; +} +static inline u32 flush_l2_clean_comptags_pending_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 flush_l2_clean_comptags_pending_empty_v(void) +{ + return 0x00000000U; +} +static inline u32 flush_l2_clean_comptags_pending_empty_f(void) +{ + return 0x0U; +} +static inline u32 flush_l2_clean_comptags_pending_busy_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_l2_clean_comptags_pending_busy_f(void) +{ + return 0x1U; +} +static inline u32 flush_l2_clean_comptags_outstanding_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 flush_l2_clean_comptags_outstanding_false_v(void) +{ + return 0x00000000U; +} +static inline u32 flush_l2_clean_comptags_outstanding_false_f(void) +{ + return 0x0U; +} +static inline u32 flush_l2_clean_comptags_outstanding_true_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_fb_flush_r(void) +{ + return 0x00070000U; +} +static inline u32 flush_fb_flush_pending_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 flush_fb_flush_pending_busy_v(void) +{ + return 0x00000001U; +} +static inline u32 flush_fb_flush_pending_busy_f(void) +{ + return 0x1U; +} +static inline u32 flush_fb_flush_outstanding_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 flush_fb_flush_outstanding_true_v(void) +{ + return 0x00000001U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fuse_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fuse_gv11b.h new file mode 100644 index 000000000..f8d9b1962 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_fuse_gv11b.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_fuse_gv11b_h_ +#define _hw_fuse_gv11b_h_ + +static inline u32 fuse_status_opt_tpc_gpc_r(u32 i) +{ + return 0x00021c38U + i*4U; +} +static inline u32 fuse_ctrl_opt_tpc_gpc_r(u32 i) +{ + return 0x00021838U + i*4U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_r(void) +{ + return 0x00021944U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_m(void) +{ + return 0xffU << 0U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_data_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_r(void) +{ + return 0x00021948U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_m(void) +{ + return 0x1U << 0U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_yes_f(void) +{ + return 0x1U; +} +static inline u32 fuse_ctrl_opt_ram_svop_pdp_override_data_no_f(void) +{ + return 0x0U; +} +static inline u32 fuse_status_opt_fbio_r(void) +{ + return 0x00021c14U; +} +static inline u32 fuse_status_opt_fbio_data_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 fuse_status_opt_fbio_data_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 fuse_status_opt_fbio_data_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 fuse_status_opt_rop_l2_fbp_r(u32 i) +{ + return 0x00021d70U + i*4U; +} +static inline u32 fuse_status_opt_fbp_r(void) +{ + return 0x00021d38U; +} +static inline u32 fuse_status_opt_fbp_idx_v(u32 r, u32 i) +{ + return (r >> (0U + i*1U)) & 0x1U; +} +static inline u32 fuse_opt_ecc_en_r(void) +{ + return 0x00021228U; +} +static inline u32 fuse_opt_feature_fuses_override_disable_r(void) +{ + return 0x000213f0U; +} +static inline u32 fuse_opt_sec_debug_en_r(void) +{ + return 0x00021218U; +} +static inline u32 fuse_opt_priv_sec_en_r(void) +{ + return 0x00021434U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gmmu_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gmmu_gv11b.h new file mode 100644 index 000000000..0a442b1ff --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gmmu_gv11b.h @@ -0,0 +1,1495 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_gmmu_gv11b_h_ +#define _hw_gmmu_gv11b_h_ + +static inline u32 gmmu_new_pde_is_pte_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pde_is_pte_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pde_aperture_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pde_aperture_invalid_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pde_aperture_video_memory_f(void) +{ + return 0x2U; +} +static inline u32 gmmu_new_pde_aperture_sys_mem_coh_f(void) +{ + return 0x4U; +} +static inline u32 gmmu_new_pde_aperture_sys_mem_ncoh_f(void) +{ + return 0x6U; +} +static inline u32 gmmu_new_pde_address_sys_f(u32 v) +{ + return (v & 0xffffffU) << 8U; +} +static inline u32 gmmu_new_pde_address_sys_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pde_vol_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pde_vol_true_f(void) +{ + return 0x8U; +} +static inline u32 gmmu_new_pde_vol_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pde_address_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 gmmu_new_pde__size_v(void) +{ + return 0x00000008U; +} +static inline u32 gmmu_new_dual_pde_is_pte_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_dual_pde_is_pte_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_dual_pde_aperture_big_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_dual_pde_aperture_big_invalid_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_dual_pde_aperture_big_video_memory_f(void) +{ + return 0x2U; +} +static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_coh_f(void) +{ + return 0x4U; +} +static inline u32 gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(void) +{ + return 0x6U; +} +static inline u32 gmmu_new_dual_pde_address_big_sys_f(u32 v) +{ + return (v & 0xfffffffU) << 4U; +} +static inline u32 gmmu_new_dual_pde_address_big_sys_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_dual_pde_aperture_small_w(void) +{ + return 2U; +} +static inline u32 gmmu_new_dual_pde_aperture_small_invalid_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_dual_pde_aperture_small_video_memory_f(void) +{ + return 0x2U; +} +static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_coh_f(void) +{ + return 0x4U; +} +static inline u32 gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(void) +{ + return 0x6U; +} +static inline u32 gmmu_new_dual_pde_vol_small_w(void) +{ + return 2U; +} +static inline u32 gmmu_new_dual_pde_vol_small_true_f(void) +{ + return 0x8U; +} +static inline u32 gmmu_new_dual_pde_vol_small_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_dual_pde_vol_big_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_dual_pde_vol_big_true_f(void) +{ + return 0x8U; +} +static inline u32 gmmu_new_dual_pde_vol_big_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_dual_pde_address_small_sys_f(u32 v) +{ + return (v & 0xffffffU) << 8U; +} +static inline u32 gmmu_new_dual_pde_address_small_sys_w(void) +{ + return 2U; +} +static inline u32 gmmu_new_dual_pde_address_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 gmmu_new_dual_pde_address_big_shift_v(void) +{ + return 0x00000008U; +} +static inline u32 gmmu_new_dual_pde__size_v(void) +{ + return 0x00000010U; +} +static inline u32 gmmu_new_pte__size_v(void) +{ + return 0x00000008U; +} +static inline u32 gmmu_new_pte_valid_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_valid_true_f(void) +{ + return 0x1U; +} +static inline u32 gmmu_new_pte_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pte_privilege_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_privilege_true_f(void) +{ + return 0x20U; +} +static inline u32 gmmu_new_pte_privilege_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pte_address_sys_f(u32 v) +{ + return (v & 0xffffffU) << 8U; +} +static inline u32 gmmu_new_pte_address_sys_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_address_vid_f(u32 v) +{ + return (v & 0xffffffU) << 8U; +} +static inline u32 gmmu_new_pte_address_vid_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_vol_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_vol_true_f(void) +{ + return 0x8U; +} +static inline u32 gmmu_new_pte_vol_false_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pte_aperture_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_aperture_video_memory_f(void) +{ + return 0x0U; +} +static inline u32 gmmu_new_pte_aperture_sys_mem_coh_f(void) +{ + return 0x4U; +} +static inline u32 gmmu_new_pte_aperture_sys_mem_ncoh_f(void) +{ + return 0x6U; +} +static inline u32 gmmu_new_pte_read_only_w(void) +{ + return 0U; +} +static inline u32 gmmu_new_pte_read_only_true_f(void) +{ + return 0x40U; +} +static inline u32 gmmu_new_pte_comptagline_f(u32 v) +{ + return (v & 0x3ffffU) << 4U; +} +static inline u32 gmmu_new_pte_comptagline_w(void) +{ + return 1U; +} +static inline u32 gmmu_new_pte_kind_f(u32 v) +{ + return (v & 0xffU) << 24U; +} +static inline u32 gmmu_new_pte_kind_w(void) +{ + return 1U; +} +static inline u32 gmmu_new_pte_address_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 gmmu_pte_kind_f(u32 v) +{ + return (v & 0xffU) << 4U; +} +static inline u32 gmmu_pte_kind_w(void) +{ + return 1U; +} +static inline u32 gmmu_pte_kind_invalid_v(void) +{ + return 0x000000ffU; +} +static inline u32 gmmu_pte_kind_pitch_v(void) +{ + return 0x00000000U; +} +static inline u32 gmmu_pte_kind_z16_v(void) +{ + return 0x00000001U; +} +static inline u32 gmmu_pte_kind_z16_2c_v(void) +{ + return 0x00000002U; +} +static inline u32 gmmu_pte_kind_z16_ms2_2c_v(void) +{ + return 0x00000003U; +} +static inline u32 gmmu_pte_kind_z16_ms4_2c_v(void) +{ + return 0x00000004U; +} +static inline u32 gmmu_pte_kind_z16_ms8_2c_v(void) +{ + return 0x00000005U; +} +static inline u32 gmmu_pte_kind_z16_ms16_2c_v(void) +{ + return 0x00000006U; +} +static inline u32 gmmu_pte_kind_z16_2z_v(void) +{ + return 0x00000007U; +} +static inline u32 gmmu_pte_kind_z16_ms2_2z_v(void) +{ + return 0x00000008U; +} +static inline u32 gmmu_pte_kind_z16_ms4_2z_v(void) +{ + return 0x00000009U; +} +static inline u32 gmmu_pte_kind_z16_ms8_2z_v(void) +{ + return 0x0000000aU; +} +static inline u32 gmmu_pte_kind_z16_ms16_2z_v(void) +{ + return 0x0000000bU; +} +static inline u32 gmmu_pte_kind_z16_2cz_v(void) +{ + return 0x00000036U; +} +static inline u32 gmmu_pte_kind_z16_ms2_2cz_v(void) +{ + return 0x00000037U; +} +static inline u32 gmmu_pte_kind_z16_ms4_2cz_v(void) +{ + return 0x00000038U; +} +static inline u32 gmmu_pte_kind_z16_ms8_2cz_v(void) +{ + return 0x00000039U; +} +static inline u32 gmmu_pte_kind_z16_ms16_2cz_v(void) +{ + return 0x0000005fU; +} +static inline u32 gmmu_pte_kind_s8z24_v(void) +{ + return 0x00000011U; +} +static inline u32 gmmu_pte_kind_s8z24_1z_v(void) +{ + return 0x00000012U; +} +static inline u32 gmmu_pte_kind_s8z24_ms2_1z_v(void) +{ + return 0x00000013U; +} +static inline u32 gmmu_pte_kind_s8z24_ms4_1z_v(void) +{ + return 0x00000014U; +} +static inline u32 gmmu_pte_kind_s8z24_ms8_1z_v(void) +{ + return 0x00000015U; +} +static inline u32 gmmu_pte_kind_s8z24_ms16_1z_v(void) +{ + return 0x00000016U; +} +static inline u32 gmmu_pte_kind_s8z24_2cz_v(void) +{ + return 0x00000017U; +} +static inline u32 gmmu_pte_kind_s8z24_ms2_2cz_v(void) +{ + return 0x00000018U; +} +static inline u32 gmmu_pte_kind_s8z24_ms4_2cz_v(void) +{ + return 0x00000019U; +} +static inline u32 gmmu_pte_kind_s8z24_ms8_2cz_v(void) +{ + return 0x0000001aU; +} +static inline u32 gmmu_pte_kind_s8z24_ms16_2cz_v(void) +{ + return 0x0000001bU; +} +static inline u32 gmmu_pte_kind_s8z24_2cs_v(void) +{ + return 0x0000001cU; +} +static inline u32 gmmu_pte_kind_s8z24_ms2_2cs_v(void) +{ + return 0x0000001dU; +} +static inline u32 gmmu_pte_kind_s8z24_ms4_2cs_v(void) +{ + return 0x0000001eU; +} +static inline u32 gmmu_pte_kind_s8z24_ms8_2cs_v(void) +{ + return 0x0000001fU; +} +static inline u32 gmmu_pte_kind_s8z24_ms16_2cs_v(void) +{ + return 0x00000020U; +} +static inline u32 gmmu_pte_kind_s8z24_4cszv_v(void) +{ + return 0x00000021U; +} +static inline u32 gmmu_pte_kind_s8z24_ms2_4cszv_v(void) +{ + return 0x00000022U; +} +static inline u32 gmmu_pte_kind_s8z24_ms4_4cszv_v(void) +{ + return 0x00000023U; +} +static inline u32 gmmu_pte_kind_s8z24_ms8_4cszv_v(void) +{ + return 0x00000024U; +} +static inline u32 gmmu_pte_kind_s8z24_ms16_4cszv_v(void) +{ + return 0x00000025U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_v(void) +{ + return 0x00000026U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_v(void) +{ + return 0x00000027U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_v(void) +{ + return 0x00000028U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_v(void) +{ + return 0x00000029U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_1zv_v(void) +{ + return 0x0000002eU; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_1zv_v(void) +{ + return 0x0000002fU; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_1zv_v(void) +{ + return 0x00000030U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_1zv_v(void) +{ + return 0x00000031U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2cs_v(void) +{ + return 0x00000032U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2cs_v(void) +{ + return 0x00000033U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2cs_v(void) +{ + return 0x00000034U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2cs_v(void) +{ + return 0x00000035U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2czv_v(void) +{ + return 0x0000003aU; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2czv_v(void) +{ + return 0x0000003bU; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2czv_v(void) +{ + return 0x0000003cU; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2czv_v(void) +{ + return 0x0000003dU; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_2zv_v(void) +{ + return 0x0000003eU; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_2zv_v(void) +{ + return 0x0000003fU; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_2zv_v(void) +{ + return 0x00000040U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_2zv_v(void) +{ + return 0x00000041U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc12_4cszv_v(void) +{ + return 0x00000042U; +} +static inline u32 gmmu_pte_kind_v8z24_ms4_vc4_4cszv_v(void) +{ + return 0x00000043U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc8_4cszv_v(void) +{ + return 0x00000044U; +} +static inline u32 gmmu_pte_kind_v8z24_ms8_vc24_4cszv_v(void) +{ + return 0x00000045U; +} +static inline u32 gmmu_pte_kind_z24s8_v(void) +{ + return 0x00000046U; +} +static inline u32 gmmu_pte_kind_z24s8_1z_v(void) +{ + return 0x00000047U; +} +static inline u32 gmmu_pte_kind_z24s8_ms2_1z_v(void) +{ + return 0x00000048U; +} +static inline u32 gmmu_pte_kind_z24s8_ms4_1z_v(void) +{ + return 0x00000049U; +} +static inline u32 gmmu_pte_kind_z24s8_ms8_1z_v(void) +{ + return 0x0000004aU; +} +static inline u32 gmmu_pte_kind_z24s8_ms16_1z_v(void) +{ + return 0x0000004bU; +} +static inline u32 gmmu_pte_kind_z24s8_2cs_v(void) +{ + return 0x0000004cU; +} +static inline u32 gmmu_pte_kind_z24s8_ms2_2cs_v(void) +{ + return 0x0000004dU; +} +static inline u32 gmmu_pte_kind_z24s8_ms4_2cs_v(void) +{ + return 0x0000004eU; +} +static inline u32 gmmu_pte_kind_z24s8_ms8_2cs_v(void) +{ + return 0x0000004fU; +} +static inline u32 gmmu_pte_kind_z24s8_ms16_2cs_v(void) +{ + return 0x00000050U; +} +static inline u32 gmmu_pte_kind_z24s8_2cz_v(void) +{ + return 0x00000051U; +} +static inline u32 gmmu_pte_kind_z24s8_ms2_2cz_v(void) +{ + return 0x00000052U; +} +static inline u32 gmmu_pte_kind_z24s8_ms4_2cz_v(void) +{ + return 0x00000053U; +} +static inline u32 gmmu_pte_kind_z24s8_ms8_2cz_v(void) +{ + return 0x00000054U; +} +static inline u32 gmmu_pte_kind_z24s8_ms16_2cz_v(void) +{ + return 0x00000055U; +} +static inline u32 gmmu_pte_kind_z24s8_4cszv_v(void) +{ + return 0x00000056U; +} +static inline u32 gmmu_pte_kind_z24s8_ms2_4cszv_v(void) +{ + return 0x00000057U; +} +static inline u32 gmmu_pte_kind_z24s8_ms4_4cszv_v(void) +{ + return 0x00000058U; +} +static inline u32 gmmu_pte_kind_z24s8_ms8_4cszv_v(void) +{ + return 0x00000059U; +} +static inline u32 gmmu_pte_kind_z24s8_ms16_4cszv_v(void) +{ + return 0x0000005aU; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_v(void) +{ + return 0x0000005bU; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_v(void) +{ + return 0x0000005cU; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_v(void) +{ + return 0x0000005dU; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_v(void) +{ + return 0x0000005eU; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_1zv_v(void) +{ + return 0x00000063U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_1zv_v(void) +{ + return 0x00000064U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_1zv_v(void) +{ + return 0x00000065U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_1zv_v(void) +{ + return 0x00000066U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2cs_v(void) +{ + return 0x00000067U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2cs_v(void) +{ + return 0x00000068U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2cs_v(void) +{ + return 0x00000069U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2cs_v(void) +{ + return 0x0000006aU; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2czv_v(void) +{ + return 0x0000006fU; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2czv_v(void) +{ + return 0x00000070U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2czv_v(void) +{ + return 0x00000071U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2czv_v(void) +{ + return 0x00000072U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_2zv_v(void) +{ + return 0x00000073U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_2zv_v(void) +{ + return 0x00000074U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_2zv_v(void) +{ + return 0x00000075U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_2zv_v(void) +{ + return 0x00000076U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc12_4cszv_v(void) +{ + return 0x00000077U; +} +static inline u32 gmmu_pte_kind_z24v8_ms4_vc4_4cszv_v(void) +{ + return 0x00000078U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc8_4cszv_v(void) +{ + return 0x00000079U; +} +static inline u32 gmmu_pte_kind_z24v8_ms8_vc24_4cszv_v(void) +{ + return 0x0000007aU; +} +static inline u32 gmmu_pte_kind_zf32_v(void) +{ + return 0x0000007bU; +} +static inline u32 gmmu_pte_kind_zf32_1z_v(void) +{ + return 0x0000007cU; +} +static inline u32 gmmu_pte_kind_zf32_ms2_1z_v(void) +{ + return 0x0000007dU; +} +static inline u32 gmmu_pte_kind_zf32_ms4_1z_v(void) +{ + return 0x0000007eU; +} +static inline u32 gmmu_pte_kind_zf32_ms8_1z_v(void) +{ + return 0x0000007fU; +} +static inline u32 gmmu_pte_kind_zf32_ms16_1z_v(void) +{ + return 0x00000080U; +} +static inline u32 gmmu_pte_kind_zf32_2cs_v(void) +{ + return 0x00000081U; +} +static inline u32 gmmu_pte_kind_zf32_ms2_2cs_v(void) +{ + return 0x00000082U; +} +static inline u32 gmmu_pte_kind_zf32_ms4_2cs_v(void) +{ + return 0x00000083U; +} +static inline u32 gmmu_pte_kind_zf32_ms8_2cs_v(void) +{ + return 0x00000084U; +} +static inline u32 gmmu_pte_kind_zf32_ms16_2cs_v(void) +{ + return 0x00000085U; +} +static inline u32 gmmu_pte_kind_zf32_2cz_v(void) +{ + return 0x00000086U; +} +static inline u32 gmmu_pte_kind_zf32_ms2_2cz_v(void) +{ + return 0x00000087U; +} +static inline u32 gmmu_pte_kind_zf32_ms4_2cz_v(void) +{ + return 0x00000088U; +} +static inline u32 gmmu_pte_kind_zf32_ms8_2cz_v(void) +{ + return 0x00000089U; +} +static inline u32 gmmu_pte_kind_zf32_ms16_2cz_v(void) +{ + return 0x0000008aU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_v(void) +{ + return 0x0000008bU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_v(void) +{ + return 0x0000008cU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_v(void) +{ + return 0x0000008dU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_v(void) +{ + return 0x0000008eU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1cs_v(void) +{ + return 0x0000008fU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1cs_v(void) +{ + return 0x00000090U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1cs_v(void) +{ + return 0x00000091U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1cs_v(void) +{ + return 0x00000092U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1zv_v(void) +{ + return 0x00000097U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1zv_v(void) +{ + return 0x00000098U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1zv_v(void) +{ + return 0x00000099U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1zv_v(void) +{ + return 0x0000009aU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_1czv_v(void) +{ + return 0x0000009bU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_1czv_v(void) +{ + return 0x0000009cU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_1czv_v(void) +{ + return 0x0000009dU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_1czv_v(void) +{ + return 0x0000009eU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_2cs_v(void) +{ + return 0x0000009fU; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_2cs_v(void) +{ + return 0x000000a0U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_2cs_v(void) +{ + return 0x000000a1U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_2cs_v(void) +{ + return 0x000000a2U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc12_2cszv_v(void) +{ + return 0x000000a3U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms4_vc4_2cszv_v(void) +{ + return 0x000000a4U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc8_2cszv_v(void) +{ + return 0x000000a5U; +} +static inline u32 gmmu_pte_kind_x8z24_x16v8s8_ms8_vc24_2cszv_v(void) +{ + return 0x000000a6U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_v(void) +{ + return 0x000000a7U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_v(void) +{ + return 0x000000a8U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_v(void) +{ + return 0x000000a9U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_v(void) +{ + return 0x000000aaU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1cs_v(void) +{ + return 0x000000abU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1cs_v(void) +{ + return 0x000000acU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1cs_v(void) +{ + return 0x000000adU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1cs_v(void) +{ + return 0x000000aeU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1zv_v(void) +{ + return 0x000000b3U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1zv_v(void) +{ + return 0x000000b4U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1zv_v(void) +{ + return 0x000000b5U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1zv_v(void) +{ + return 0x000000b6U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_1czv_v(void) +{ + return 0x000000b7U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_1czv_v(void) +{ + return 0x000000b8U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_1czv_v(void) +{ + return 0x000000b9U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_1czv_v(void) +{ + return 0x000000baU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_2cs_v(void) +{ + return 0x000000bbU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_2cs_v(void) +{ + return 0x000000bcU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_2cs_v(void) +{ + return 0x000000bdU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_2cs_v(void) +{ + return 0x000000beU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc12_2cszv_v(void) +{ + return 0x000000bfU; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms4_vc4_2cszv_v(void) +{ + return 0x000000c0U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc8_2cszv_v(void) +{ + return 0x000000c1U; +} +static inline u32 gmmu_pte_kind_zf32_x16v8s8_ms8_vc24_2cszv_v(void) +{ + return 0x000000c2U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_v(void) +{ + return 0x000000c3U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_1cs_v(void) +{ + return 0x000000c4U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_1cs_v(void) +{ + return 0x000000c5U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_1cs_v(void) +{ + return 0x000000c6U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_1cs_v(void) +{ + return 0x000000c7U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_1cs_v(void) +{ + return 0x000000c8U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_2cszv_v(void) +{ + return 0x000000ceU; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_2cszv_v(void) +{ + return 0x000000cfU; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_2cszv_v(void) +{ + return 0x000000d0U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_2cszv_v(void) +{ + return 0x000000d1U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_2cszv_v(void) +{ + return 0x000000d2U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_2cs_v(void) +{ + return 0x000000d3U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms2_2cs_v(void) +{ + return 0x000000d4U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms4_2cs_v(void) +{ + return 0x000000d5U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms8_2cs_v(void) +{ + return 0x000000d6U; +} +static inline u32 gmmu_pte_kind_zf32_x24s8_ms16_2cs_v(void) +{ + return 0x000000d7U; +} +static inline u32 gmmu_pte_kind_generic_16bx2_v(void) +{ + return 0x000000feU; +} +static inline u32 gmmu_pte_kind_c32_2c_v(void) +{ + return 0x000000d8U; +} +static inline u32 gmmu_pte_kind_c32_2cbr_v(void) +{ + return 0x000000d9U; +} +static inline u32 gmmu_pte_kind_c32_2cba_v(void) +{ + return 0x000000daU; +} +static inline u32 gmmu_pte_kind_c32_2cra_v(void) +{ + return 0x000000dbU; +} +static inline u32 gmmu_pte_kind_c32_2bra_v(void) +{ + return 0x000000dcU; +} +static inline u32 gmmu_pte_kind_c32_ms2_2c_v(void) +{ + return 0x000000ddU; +} +static inline u32 gmmu_pte_kind_c32_ms2_2cbr_v(void) +{ + return 0x000000deU; +} +static inline u32 gmmu_pte_kind_c32_ms2_4cbra_v(void) +{ + return 0x000000ccU; +} +static inline u32 gmmu_pte_kind_c32_ms4_2c_v(void) +{ + return 0x000000dfU; +} +static inline u32 gmmu_pte_kind_c32_ms4_2cbr_v(void) +{ + return 0x000000e0U; +} +static inline u32 gmmu_pte_kind_c32_ms4_2cba_v(void) +{ + return 0x000000e1U; +} +static inline u32 gmmu_pte_kind_c32_ms4_2cra_v(void) +{ + return 0x000000e2U; +} +static inline u32 gmmu_pte_kind_c32_ms4_2bra_v(void) +{ + return 0x000000e3U; +} +static inline u32 gmmu_pte_kind_c32_ms4_4cbra_v(void) +{ + return 0x0000002cU; +} +static inline u32 gmmu_pte_kind_c32_ms8_ms16_2c_v(void) +{ + return 0x000000e4U; +} +static inline u32 gmmu_pte_kind_c32_ms8_ms16_2cra_v(void) +{ + return 0x000000e5U; +} +static inline u32 gmmu_pte_kind_c64_2c_v(void) +{ + return 0x000000e6U; +} +static inline u32 gmmu_pte_kind_c64_2cbr_v(void) +{ + return 0x000000e7U; +} +static inline u32 gmmu_pte_kind_c64_2cba_v(void) +{ + return 0x000000e8U; +} +static inline u32 gmmu_pte_kind_c64_2cra_v(void) +{ + return 0x000000e9U; +} +static inline u32 gmmu_pte_kind_c64_2bra_v(void) +{ + return 0x000000eaU; +} +static inline u32 gmmu_pte_kind_c64_ms2_2c_v(void) +{ + return 0x000000ebU; +} +static inline u32 gmmu_pte_kind_c64_ms2_2cbr_v(void) +{ + return 0x000000ecU; +} +static inline u32 gmmu_pte_kind_c64_ms2_4cbra_v(void) +{ + return 0x000000cdU; +} +static inline u32 gmmu_pte_kind_c64_ms4_2c_v(void) +{ + return 0x000000edU; +} +static inline u32 gmmu_pte_kind_c64_ms4_2cbr_v(void) +{ + return 0x000000eeU; +} +static inline u32 gmmu_pte_kind_c64_ms4_2cba_v(void) +{ + return 0x000000efU; +} +static inline u32 gmmu_pte_kind_c64_ms4_2cra_v(void) +{ + return 0x000000f0U; +} +static inline u32 gmmu_pte_kind_c64_ms4_2bra_v(void) +{ + return 0x000000f1U; +} +static inline u32 gmmu_pte_kind_c64_ms4_4cbra_v(void) +{ + return 0x0000002dU; +} +static inline u32 gmmu_pte_kind_c64_ms8_ms16_2c_v(void) +{ + return 0x000000f2U; +} +static inline u32 gmmu_pte_kind_c64_ms8_ms16_2cra_v(void) +{ + return 0x000000f3U; +} +static inline u32 gmmu_pte_kind_c128_2c_v(void) +{ + return 0x000000f4U; +} +static inline u32 gmmu_pte_kind_c128_2cr_v(void) +{ + return 0x000000f5U; +} +static inline u32 gmmu_pte_kind_c128_ms2_2c_v(void) +{ + return 0x000000f6U; +} +static inline u32 gmmu_pte_kind_c128_ms2_2cr_v(void) +{ + return 0x000000f7U; +} +static inline u32 gmmu_pte_kind_c128_ms4_2c_v(void) +{ + return 0x000000f8U; +} +static inline u32 gmmu_pte_kind_c128_ms4_2cr_v(void) +{ + return 0x000000f9U; +} +static inline u32 gmmu_pte_kind_c128_ms8_ms16_2c_v(void) +{ + return 0x000000faU; +} +static inline u32 gmmu_pte_kind_c128_ms8_ms16_2cr_v(void) +{ + return 0x000000fbU; +} +static inline u32 gmmu_pte_kind_x8c24_v(void) +{ + return 0x000000fcU; +} +static inline u32 gmmu_pte_kind_pitch_no_swizzle_v(void) +{ + return 0x000000fdU; +} +static inline u32 gmmu_pte_kind_smsked_message_v(void) +{ + return 0x000000caU; +} +static inline u32 gmmu_pte_kind_smhost_message_v(void) +{ + return 0x000000cbU; +} +static inline u32 gmmu_pte_kind_s8_v(void) +{ + return 0x0000002aU; +} +static inline u32 gmmu_pte_kind_s8_2s_v(void) +{ + return 0x0000002bU; +} +static inline u32 gmmu_fault_client_type_gpc_v(void) +{ + return 0x00000000U; +} +static inline u32 gmmu_fault_client_type_hub_v(void) +{ + return 0x00000001U; +} +static inline u32 gmmu_fault_type_unbound_inst_block_v(void) +{ + return 0x00000004U; +} +static inline u32 gmmu_fault_type_pte_v(void) +{ + return 0x00000002U; +} +static inline u32 gmmu_fault_mmu_eng_id_bar2_v(void) +{ + return 0x00000005U; +} +static inline u32 gmmu_fault_mmu_eng_id_physical_v(void) +{ + return 0x0000001fU; +} +static inline u32 gmmu_fault_mmu_eng_id_ce0_v(void) +{ + return 0x0000000fU; +} +static inline u32 gmmu_fault_buf_size_v(void) +{ + return 0x00000020U; +} +static inline u32 gmmu_fault_buf_entry_inst_aperture_v(u32 r) +{ + return (r >> 8U) & 0x3U; +} +static inline u32 gmmu_fault_buf_entry_inst_aperture_w(void) +{ + return 0U; +} +static inline u32 gmmu_fault_buf_entry_inst_aperture_vid_mem_v(void) +{ + return 0x00000000U; +} +static inline u32 gmmu_fault_buf_entry_inst_aperture_sys_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 gmmu_fault_buf_entry_inst_aperture_sys_nocoh_v(void) +{ + return 0x00000003U; +} +static inline u32 gmmu_fault_buf_entry_inst_lo_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 gmmu_fault_buf_entry_inst_lo_v(u32 r) +{ + return (r >> 12U) & 0xfffffU; +} +static inline u32 gmmu_fault_buf_entry_inst_lo_w(void) +{ + return 0U; +} +static inline u32 gmmu_fault_buf_entry_inst_hi_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 gmmu_fault_buf_entry_inst_hi_w(void) +{ + return 1U; +} +static inline u32 gmmu_fault_buf_entry_addr_phys_aperture_v(u32 r) +{ + return (r >> 0U) & 0x3U; +} +static inline u32 gmmu_fault_buf_entry_addr_phys_aperture_w(void) +{ + return 2U; +} +static inline u32 gmmu_fault_buf_entry_addr_lo_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 gmmu_fault_buf_entry_addr_lo_v(u32 r) +{ + return (r >> 12U) & 0xfffffU; +} +static inline u32 gmmu_fault_buf_entry_addr_lo_w(void) +{ + return 2U; +} +static inline u32 gmmu_fault_buf_entry_addr_hi_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 gmmu_fault_buf_entry_addr_hi_w(void) +{ + return 3U; +} +static inline u32 gmmu_fault_buf_entry_timestamp_lo_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 gmmu_fault_buf_entry_timestamp_lo_w(void) +{ + return 4U; +} +static inline u32 gmmu_fault_buf_entry_timestamp_hi_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 gmmu_fault_buf_entry_timestamp_hi_w(void) +{ + return 5U; +} +static inline u32 gmmu_fault_buf_entry_engine_id_v(u32 r) +{ + return (r >> 0U) & 0x1ffU; +} +static inline u32 gmmu_fault_buf_entry_engine_id_w(void) +{ + return 6U; +} +static inline u32 gmmu_fault_buf_entry_fault_type_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 gmmu_fault_buf_entry_fault_type_w(void) +{ + return 7U; +} +static inline u32 gmmu_fault_buf_entry_replayable_fault_v(u32 r) +{ + return (r >> 7U) & 0x1U; +} +static inline u32 gmmu_fault_buf_entry_replayable_fault_w(void) +{ + return 7U; +} +static inline u32 gmmu_fault_buf_entry_replayable_fault_true_v(void) +{ + return 0x00000001U; +} +static inline u32 gmmu_fault_buf_entry_replayable_fault_true_f(void) +{ + return 0x80U; +} +static inline u32 gmmu_fault_buf_entry_client_v(u32 r) +{ + return (r >> 8U) & 0x7fU; +} +static inline u32 gmmu_fault_buf_entry_client_w(void) +{ + return 7U; +} +static inline u32 gmmu_fault_buf_entry_access_type_v(u32 r) +{ + return (r >> 16U) & 0xfU; +} +static inline u32 gmmu_fault_buf_entry_access_type_w(void) +{ + return 7U; +} +static inline u32 gmmu_fault_buf_entry_mmu_client_type_v(u32 r) +{ + return (r >> 20U) & 0x1U; +} +static inline u32 gmmu_fault_buf_entry_mmu_client_type_w(void) +{ + return 7U; +} +static inline u32 gmmu_fault_buf_entry_gpc_id_v(u32 r) +{ + return (r >> 24U) & 0x1fU; +} +static inline u32 gmmu_fault_buf_entry_gpc_id_w(void) +{ + return 7U; +} +static inline u32 gmmu_fault_buf_entry_protected_mode_v(u32 r) +{ + return (r >> 29U) & 0x1U; +} +static inline u32 gmmu_fault_buf_entry_protected_mode_w(void) +{ + return 7U; +} +static inline u32 gmmu_fault_buf_entry_protected_mode_true_v(void) +{ + return 0x00000001U; +} +static inline u32 gmmu_fault_buf_entry_protected_mode_true_f(void) +{ + return 0x20000000U; +} +static inline u32 gmmu_fault_buf_entry_replayable_fault_en_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 gmmu_fault_buf_entry_replayable_fault_en_w(void) +{ + return 7U; +} +static inline u32 gmmu_fault_buf_entry_replayable_fault_en_true_v(void) +{ + return 0x00000001U; +} +static inline u32 gmmu_fault_buf_entry_replayable_fault_en_true_f(void) +{ + return 0x40000000U; +} +static inline u32 gmmu_fault_buf_entry_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gmmu_fault_buf_entry_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 gmmu_fault_buf_entry_valid_w(void) +{ + return 7U; +} +static inline u32 gmmu_fault_buf_entry_valid_true_v(void) +{ + return 0x00000001U; +} +static inline u32 gmmu_fault_buf_entry_valid_true_f(void) +{ + return 0x80000000U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gr_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gr_gv11b.h new file mode 100644 index 000000000..692b7ba32 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_gr_gv11b.h @@ -0,0 +1,4939 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_gr_gv11b_h_ +#define _hw_gr_gv11b_h_ + +static inline u32 gr_intr_r(void) +{ + return 0x00400100U; +} +static inline u32 gr_intr_notify_pending_f(void) +{ + return 0x1U; +} +static inline u32 gr_intr_notify_reset_f(void) +{ + return 0x1U; +} +static inline u32 gr_intr_semaphore_pending_f(void) +{ + return 0x2U; +} +static inline u32 gr_intr_semaphore_reset_f(void) +{ + return 0x2U; +} +static inline u32 gr_intr_illegal_method_pending_f(void) +{ + return 0x10U; +} +static inline u32 gr_intr_illegal_method_reset_f(void) +{ + return 0x10U; +} +static inline u32 gr_intr_illegal_notify_pending_f(void) +{ + return 0x40U; +} +static inline u32 gr_intr_illegal_notify_reset_f(void) +{ + return 0x40U; +} +static inline u32 gr_intr_firmware_method_f(u32 v) +{ + return (v & 0x1U) << 8U; +} +static inline u32 gr_intr_firmware_method_pending_f(void) +{ + return 0x100U; +} +static inline u32 gr_intr_firmware_method_reset_f(void) +{ + return 0x100U; +} +static inline u32 gr_intr_illegal_class_pending_f(void) +{ + return 0x20U; +} +static inline u32 gr_intr_illegal_class_reset_f(void) +{ + return 0x20U; +} +static inline u32 gr_intr_fecs_error_pending_f(void) +{ + return 0x80000U; +} +static inline u32 gr_intr_fecs_error_reset_f(void) +{ + return 0x80000U; +} +static inline u32 gr_intr_class_error_pending_f(void) +{ + return 0x100000U; +} +static inline u32 gr_intr_class_error_reset_f(void) +{ + return 0x100000U; +} +static inline u32 gr_intr_exception_pending_f(void) +{ + return 0x200000U; +} +static inline u32 gr_intr_exception_reset_f(void) +{ + return 0x200000U; +} +static inline u32 gr_fecs_intr_r(void) +{ + return 0x00400144U; +} +static inline u32 gr_class_error_r(void) +{ + return 0x00400110U; +} +static inline u32 gr_class_error_code_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_intr_nonstall_r(void) +{ + return 0x00400120U; +} +static inline u32 gr_intr_nonstall_trap_pending_f(void) +{ + return 0x2U; +} +static inline u32 gr_intr_en_r(void) +{ + return 0x0040013cU; +} +static inline u32 gr_exception_r(void) +{ + return 0x00400108U; +} +static inline u32 gr_exception_fe_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_exception_gpc_m(void) +{ + return 0x1U << 24U; +} +static inline u32 gr_exception_memfmt_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_exception_ds_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_exception_sked_m(void) +{ + return 0x1U << 8U; +} +static inline u32 gr_exception1_r(void) +{ + return 0x00400118U; +} +static inline u32 gr_exception1_gpc_0_pending_f(void) +{ + return 0x1U; +} +static inline u32 gr_exception2_r(void) +{ + return 0x0040011cU; +} +static inline u32 gr_exception_en_r(void) +{ + return 0x00400138U; +} +static inline u32 gr_exception_en_fe_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_exception_en_fe_enabled_f(void) +{ + return 0x1U; +} +static inline u32 gr_exception_en_gpc_m(void) +{ + return 0x1U << 24U; +} +static inline u32 gr_exception_en_gpc_enabled_f(void) +{ + return 0x1000000U; +} +static inline u32 gr_exception_en_memfmt_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_exception_en_memfmt_enabled_f(void) +{ + return 0x2U; +} +static inline u32 gr_exception_en_ds_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_exception_en_ds_enabled_f(void) +{ + return 0x10U; +} +static inline u32 gr_exception1_en_r(void) +{ + return 0x00400130U; +} +static inline u32 gr_exception2_en_r(void) +{ + return 0x00400134U; +} +static inline u32 gr_gpfifo_ctl_r(void) +{ + return 0x00400500U; +} +static inline u32 gr_gpfifo_ctl_access_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_gpfifo_ctl_access_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpfifo_ctl_access_enabled_f(void) +{ + return 0x1U; +} +static inline u32 gr_gpfifo_ctl_semaphore_access_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpfifo_ctl_semaphore_access_enabled_f(void) +{ + return 0x10000U; +} +static inline u32 gr_gpfifo_status_r(void) +{ + return 0x00400504U; +} +static inline u32 gr_trapped_addr_r(void) +{ + return 0x00400704U; +} +static inline u32 gr_trapped_addr_mthd_v(u32 r) +{ + return (r >> 2U) & 0xfffU; +} +static inline u32 gr_trapped_addr_subch_v(u32 r) +{ + return (r >> 16U) & 0x7U; +} +static inline u32 gr_trapped_addr_mme_generated_v(u32 r) +{ + return (r >> 20U) & 0x1U; +} +static inline u32 gr_trapped_addr_datahigh_v(u32 r) +{ + return (r >> 24U) & 0x1U; +} +static inline u32 gr_trapped_addr_priv_v(u32 r) +{ + return (r >> 28U) & 0x1U; +} +static inline u32 gr_trapped_addr_status_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 gr_trapped_data_lo_r(void) +{ + return 0x00400708U; +} +static inline u32 gr_trapped_data_hi_r(void) +{ + return 0x0040070cU; +} +static inline u32 gr_trapped_data_mme_r(void) +{ + return 0x00400710U; +} +static inline u32 gr_trapped_data_mme_pc_v(u32 r) +{ + return (r >> 0U) & 0xfffU; +} +static inline u32 gr_status_r(void) +{ + return 0x00400700U; +} +static inline u32 gr_status_fe_method_upper_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 gr_status_fe_method_lower_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 gr_status_fe_method_lower_idle_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_status_fe_gi_v(u32 r) +{ + return (r >> 21U) & 0x1U; +} +static inline u32 gr_status_mask_r(void) +{ + return 0x00400610U; +} +static inline u32 gr_status_1_r(void) +{ + return 0x00400604U; +} +static inline u32 gr_status_2_r(void) +{ + return 0x00400608U; +} +static inline u32 gr_engine_status_r(void) +{ + return 0x0040060cU; +} +static inline u32 gr_engine_status_value_busy_f(void) +{ + return 0x1U; +} +static inline u32 gr_pri_be0_becs_be_exception_r(void) +{ + return 0x00410204U; +} +static inline u32 gr_pri_be0_becs_be_exception_en_r(void) +{ + return 0x00410208U; +} +static inline u32 gr_pri_gpc0_gpccs_gpc_exception_r(void) +{ + return 0x00502c90U; +} +static inline u32 gr_pri_gpc0_gpccs_gpc_exception_en_r(void) +{ + return 0x00502c94U; +} +static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_r(void) +{ + return 0x00504508U; +} +static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_exception_en_r(void) +{ + return 0x0050450cU; +} +static inline u32 gr_activity_0_r(void) +{ + return 0x00400380U; +} +static inline u32 gr_activity_1_r(void) +{ + return 0x00400384U; +} +static inline u32 gr_activity_2_r(void) +{ + return 0x00400388U; +} +static inline u32 gr_activity_4_r(void) +{ + return 0x00400390U; +} +static inline u32 gr_activity_4_gpc0_s(void) +{ + return 3U; +} +static inline u32 gr_activity_4_gpc0_f(u32 v) +{ + return (v & 0x7U) << 0U; +} +static inline u32 gr_activity_4_gpc0_m(void) +{ + return 0x7U << 0U; +} +static inline u32 gr_activity_4_gpc0_v(u32 r) +{ + return (r >> 0U) & 0x7U; +} +static inline u32 gr_activity_4_gpc0_empty_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_activity_4_gpc0_preempted_v(void) +{ + return 0x00000004U; +} +static inline u32 gr_pri_gpc0_gcc_dbg_r(void) +{ + return 0x00501000U; +} +static inline u32 gr_pri_gpcs_gcc_dbg_r(void) +{ + return 0x00419000U; +} +static inline u32 gr_pri_gpcs_gcc_dbg_invalidate_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cache_control_r(void) +{ + return 0x0050433cU; +} +static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_r(void) +{ + return 0x00419b3cU; +} +static inline u32 gr_pri_gpcs_tpcs_sm_cache_control_invalidate_cache_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_pri_sked_activity_r(void) +{ + return 0x00407054U; +} +static inline u32 gr_pri_gpc0_gpccs_gpc_activity0_r(void) +{ + return 0x00502c80U; +} +static inline u32 gr_pri_gpc0_gpccs_gpc_activity1_r(void) +{ + return 0x00502c84U; +} +static inline u32 gr_pri_gpc0_gpccs_gpc_activity2_r(void) +{ + return 0x00502c88U; +} +static inline u32 gr_pri_gpc0_gpccs_gpc_activity3_r(void) +{ + return 0x00502c8cU; +} +static inline u32 gr_pri_gpc0_tpc0_tpccs_tpc_activity_0_r(void) +{ + return 0x00504500U; +} +static inline u32 gr_pri_gpc0_tpc1_tpccs_tpc_activity_0_r(void) +{ + return 0x00504d00U; +} +static inline u32 gr_pri_gpc0_tpcs_tpccs_tpc_activity_0_r(void) +{ + return 0x00501d00U; +} +static inline u32 gr_pri_gpcs_gpccs_gpc_activity_0_r(void) +{ + return 0x0041ac80U; +} +static inline u32 gr_pri_gpcs_gpccs_gpc_activity_1_r(void) +{ + return 0x0041ac84U; +} +static inline u32 gr_pri_gpcs_gpccs_gpc_activity_2_r(void) +{ + return 0x0041ac88U; +} +static inline u32 gr_pri_gpcs_gpccs_gpc_activity_3_r(void) +{ + return 0x0041ac8cU; +} +static inline u32 gr_pri_gpcs_tpc0_tpccs_tpc_activity_0_r(void) +{ + return 0x0041c500U; +} +static inline u32 gr_pri_gpcs_tpc1_tpccs_tpc_activity_0_r(void) +{ + return 0x0041cd00U; +} +static inline u32 gr_pri_gpcs_tpcs_tpccs_tpc_activity_0_r(void) +{ + return 0x00419d00U; +} +static inline u32 gr_pri_be0_becs_be_activity0_r(void) +{ + return 0x00410200U; +} +static inline u32 gr_pri_be1_becs_be_activity0_r(void) +{ + return 0x00410600U; +} +static inline u32 gr_pri_bes_becs_be_activity0_r(void) +{ + return 0x00408a00U; +} +static inline u32 gr_pri_ds_mpipe_status_r(void) +{ + return 0x00405858U; +} +static inline u32 gr_pri_fe_go_idle_info_r(void) +{ + return 0x00404194U; +} +static inline u32 gr_pri_fe_chip_def_info_r(void) +{ + return 0x00404030U; +} +static inline u32 gr_pri_fe_chip_def_info_max_veid_count_v(u32 r) +{ + return (r >> 0U) & 0xfffU; +} +static inline u32 gr_pri_fe_chip_def_info_max_veid_count_init_v(void) +{ + return 0x00000040U; +} +static inline u32 gr_pri_gpc0_tpc0_tex_m_tex_subunits_status_r(void) +{ + return 0x00504238U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_r(void) +{ + return 0x00504358U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp0_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp1_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp2_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp3_m(void) +{ + return 0x1U << 3U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp4_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp5_m(void) +{ + return 0x1U << 5U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp6_m(void) +{ + return 0x1U << 6U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_qrfdp7_m(void) +{ + return 0x1U << 7U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp0_m(void) +{ + return 0x1U << 8U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp1_m(void) +{ + return 0x1U << 9U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp2_m(void) +{ + return 0x1U << 10U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp3_m(void) +{ + return 0x1U << 11U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp4_m(void) +{ + return 0x1U << 12U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp5_m(void) +{ + return 0x1U << 13U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp6_m(void) +{ + return 0x1U << 14U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_qrfdp7_m(void) +{ + return 0x1U << 15U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_corrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 24U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 26U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_status_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_r(void) +{ + return 0x0050435cU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_r(void) +{ + return 0x00504360U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_lrf_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_r(void) +{ + return 0x0050436cU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_0_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_el1_1_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_0_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_el1_1_m(void) +{ + return 0x1U << 3U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_corrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 8U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 10U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_status_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_r(void) +{ + return 0x00504370U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_r(void) +{ + return 0x00504374U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_data_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_r(void) +{ + return 0x0050464cU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l0_data_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l0_predecode_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l1_data_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_l1_predecode_m(void) +{ + return 0x1U << 3U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l0_data_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l0_predecode_m(void) +{ + return 0x1U << 5U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l1_data_m(void) +{ + return 0x1U << 6U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_l1_predecode_m(void) +{ + return 0x1U << 7U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_corrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 16U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 18U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_status_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_r(void) +{ + return 0x00504650U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_r(void) +{ + return 0x00504654U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_icache_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_r(void) +{ + return 0x00504624U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_el1_0_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_el1_1_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_el1_0_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_el1_1_m(void) +{ + return 0x1U << 3U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_pixrpf_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_miss_fifo_m(void) +{ + return 0x1U << 5U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_pixrpf_m(void) +{ + return 0x1U << 6U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_miss_fifo_m(void) +{ + return 0x1U << 7U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_corrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 8U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 10U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_status_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_r(void) +{ + return 0x00504628U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_r(void) +{ + return 0x0050462cU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_l1_tag_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_r(void) +{ + return 0x00504638U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm0_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_warp_sm1_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm0_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_barrier_sm1_m(void) +{ + return 0x1U << 3U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm0_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_warp_sm1_m(void) +{ + return 0x1U << 5U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm0_m(void) +{ + return 0x1U << 6U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_barrier_sm1_m(void) +{ + return 0x1U << 7U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_corrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 16U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 18U) & 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_status_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_r(void) +{ + return 0x0050463cU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_r(void) +{ + return 0x00504640U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_tpc0_sm_cbu_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_r(void) +{ + return 0x005042c4U; +} +static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_default_f(void) +{ + return 0x0U; +} +static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe0_f(void) +{ + return 0x1U; +} +static inline u32 gr_pri_gpc0_tpc0_tex_m_routing_sel_pipe1_f(void) +{ + return 0x2U; +} +static inline u32 gr_gpc0_tpc0_mpc_hww_esr_r(void) +{ + return 0x00504430U; +} +static inline u32 gr_gpc0_tpc0_mpc_hww_esr_reset_trigger_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_r(void) +{ + return 0x00504434U; +} +static inline u32 gr_gpc0_tpc0_mpc_hww_esr_info_veid_v(u32 r) +{ + return (r >> 0U) & 0x3fU; +} +static inline u32 gr_pri_be0_crop_status1_r(void) +{ + return 0x00410134U; +} +static inline u32 gr_pri_bes_crop_status1_r(void) +{ + return 0x00408934U; +} +static inline u32 gr_pri_be0_zrop_status_r(void) +{ + return 0x00410048U; +} +static inline u32 gr_pri_be0_zrop_status2_r(void) +{ + return 0x0041004cU; +} +static inline u32 gr_pri_bes_zrop_status_r(void) +{ + return 0x00408848U; +} +static inline u32 gr_pri_bes_zrop_status2_r(void) +{ + return 0x0040884cU; +} +static inline u32 gr_pipe_bundle_address_r(void) +{ + return 0x00400200U; +} +static inline u32 gr_pipe_bundle_address_value_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pipe_bundle_address_veid_f(u32 v) +{ + return (v & 0x3fU) << 20U; +} +static inline u32 gr_pipe_bundle_address_veid_w(void) +{ + return 0U; +} +static inline u32 gr_pipe_bundle_data_r(void) +{ + return 0x00400204U; +} +static inline u32 gr_pipe_bundle_config_r(void) +{ + return 0x00400208U; +} +static inline u32 gr_pipe_bundle_config_override_pipe_mode_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_pipe_bundle_config_override_pipe_mode_enabled_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_fe_hww_esr_r(void) +{ + return 0x00404000U; +} +static inline u32 gr_fe_hww_esr_reset_active_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_fe_hww_esr_en_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_gpcs_tpcs_sms_hww_global_esr_report_mask_r(void) +{ + return 0x00419eacU; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_r(void) +{ + return 0x0050472cU; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_multiple_warp_errors_report_f(void) +{ + return 0x4U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_bpt_int_report_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_bpt_pause_report_f(void) +{ + return 0x20U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_single_step_complete_report_f(void) +{ + return 0x40U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_report_mask_error_in_trap_report_f(void) +{ + return 0x100U; +} +static inline u32 gr_gpcs_tpcs_sms_hww_global_esr_r(void) +{ + return 0x00419eb4U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_r(void) +{ + return 0x00504734U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_int_pending_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_m(void) +{ + return 0x1U << 5U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_bpt_pause_pending_f(void) +{ + return 0x20U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_m(void) +{ + return 0x1U << 6U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_single_step_complete_pending_f(void) +{ + return 0x40U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_multiple_warp_errors_pending_f(void) +{ + return 0x4U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_error_in_trap_m(void) +{ + return 0x1U << 8U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_global_esr_error_in_trap_pending_f(void) +{ + return 0x100U; +} +static inline u32 gr_fe_go_idle_timeout_r(void) +{ + return 0x00404154U; +} +static inline u32 gr_fe_go_idle_timeout_count_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_fe_go_idle_timeout_count_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fe_go_idle_timeout_count_prod_f(void) +{ + return 0x1800U; +} +static inline u32 gr_fe_object_table_r(u32 i) +{ + return 0x00404200U + i*4U; +} +static inline u32 gr_fe_object_table_nvclass_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_fe_tpc_fs_r(u32 i) +{ + return 0x0040a200U + i*4U; +} +static inline u32 gr_pri_mme_shadow_raw_index_r(void) +{ + return 0x00404488U; +} +static inline u32 gr_pri_mme_shadow_raw_index_write_trigger_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_pri_mme_shadow_raw_data_r(void) +{ + return 0x0040448cU; +} +static inline u32 gr_mme_hww_esr_r(void) +{ + return 0x00404490U; +} +static inline u32 gr_mme_hww_esr_reset_active_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_mme_hww_esr_en_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_memfmt_hww_esr_r(void) +{ + return 0x00404600U; +} +static inline u32 gr_memfmt_hww_esr_reset_active_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_memfmt_hww_esr_en_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_fecs_cpuctl_r(void) +{ + return 0x00409100U; +} +static inline u32 gr_fecs_cpuctl_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 gr_fecs_cpuctl_alias_r(void) +{ + return 0x00409130U; +} +static inline u32 gr_fecs_cpuctl_alias_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 gr_fecs_dmactl_r(void) +{ + return 0x0040910cU; +} +static inline u32 gr_fecs_dmactl_require_ctx_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_fecs_dmactl_dmem_scrubbing_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_fecs_dmactl_imem_scrubbing_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_fecs_os_r(void) +{ + return 0x00409080U; +} +static inline u32 gr_fecs_idlestate_r(void) +{ + return 0x0040904cU; +} +static inline u32 gr_fecs_mailbox0_r(void) +{ + return 0x00409040U; +} +static inline u32 gr_fecs_mailbox1_r(void) +{ + return 0x00409044U; +} +static inline u32 gr_fecs_irqstat_r(void) +{ + return 0x00409008U; +} +static inline u32 gr_fecs_irqmode_r(void) +{ + return 0x0040900cU; +} +static inline u32 gr_fecs_irqmask_r(void) +{ + return 0x00409018U; +} +static inline u32 gr_fecs_irqdest_r(void) +{ + return 0x0040901cU; +} +static inline u32 gr_fecs_curctx_r(void) +{ + return 0x00409050U; +} +static inline u32 gr_fecs_nxtctx_r(void) +{ + return 0x00409054U; +} +static inline u32 gr_fecs_engctl_r(void) +{ + return 0x004090a4U; +} +static inline u32 gr_fecs_debug1_r(void) +{ + return 0x00409090U; +} +static inline u32 gr_fecs_debuginfo_r(void) +{ + return 0x00409094U; +} +static inline u32 gr_fecs_icd_cmd_r(void) +{ + return 0x00409200U; +} +static inline u32 gr_fecs_icd_cmd_opc_s(void) +{ + return 4U; +} +static inline u32 gr_fecs_icd_cmd_opc_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_fecs_icd_cmd_opc_m(void) +{ + return 0xfU << 0U; +} +static inline u32 gr_fecs_icd_cmd_opc_v(u32 r) +{ + return (r >> 0U) & 0xfU; +} +static inline u32 gr_fecs_icd_cmd_opc_rreg_f(void) +{ + return 0x8U; +} +static inline u32 gr_fecs_icd_cmd_opc_rstat_f(void) +{ + return 0xeU; +} +static inline u32 gr_fecs_icd_cmd_idx_f(u32 v) +{ + return (v & 0x1fU) << 8U; +} +static inline u32 gr_fecs_icd_rdata_r(void) +{ + return 0x0040920cU; +} +static inline u32 gr_fecs_imemc_r(u32 i) +{ + return 0x00409180U + i*16U; +} +static inline u32 gr_fecs_imemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 gr_fecs_imemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_fecs_imemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 gr_fecs_imemd_r(u32 i) +{ + return 0x00409184U + i*16U; +} +static inline u32 gr_fecs_imemt_r(u32 i) +{ + return 0x00409188U + i*16U; +} +static inline u32 gr_fecs_imemt_tag_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_fecs_dmemc_r(u32 i) +{ + return 0x004091c0U + i*8U; +} +static inline u32 gr_fecs_dmemc_offs_s(void) +{ + return 6U; +} +static inline u32 gr_fecs_dmemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 gr_fecs_dmemc_offs_m(void) +{ + return 0x3fU << 2U; +} +static inline u32 gr_fecs_dmemc_offs_v(u32 r) +{ + return (r >> 2U) & 0x3fU; +} +static inline u32 gr_fecs_dmemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_fecs_dmemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 gr_fecs_dmemd_r(u32 i) +{ + return 0x004091c4U + i*8U; +} +static inline u32 gr_fecs_dmatrfbase_r(void) +{ + return 0x00409110U; +} +static inline u32 gr_fecs_dmatrfmoffs_r(void) +{ + return 0x00409114U; +} +static inline u32 gr_fecs_dmatrffboffs_r(void) +{ + return 0x0040911cU; +} +static inline u32 gr_fecs_dmatrfcmd_r(void) +{ + return 0x00409118U; +} +static inline u32 gr_fecs_dmatrfcmd_imem_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 gr_fecs_dmatrfcmd_write_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 gr_fecs_dmatrfcmd_size_f(u32 v) +{ + return (v & 0x7U) << 8U; +} +static inline u32 gr_fecs_dmatrfcmd_ctxdma_f(u32 v) +{ + return (v & 0x7U) << 12U; +} +static inline u32 gr_fecs_bootvec_r(void) +{ + return 0x00409104U; +} +static inline u32 gr_fecs_bootvec_vec_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_fecs_falcon_hwcfg_r(void) +{ + return 0x00409108U; +} +static inline u32 gr_gpcs_gpccs_falcon_hwcfg_r(void) +{ + return 0x0041a108U; +} +static inline u32 gr_fecs_falcon_rm_r(void) +{ + return 0x00409084U; +} +static inline u32 gr_fecs_current_ctx_r(void) +{ + return 0x00409b00U; +} +static inline u32 gr_fecs_current_ctx_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 gr_fecs_current_ctx_ptr_v(u32 r) +{ + return (r >> 0U) & 0xfffffffU; +} +static inline u32 gr_fecs_current_ctx_target_s(void) +{ + return 2U; +} +static inline u32 gr_fecs_current_ctx_target_f(u32 v) +{ + return (v & 0x3U) << 28U; +} +static inline u32 gr_fecs_current_ctx_target_m(void) +{ + return 0x3U << 28U; +} +static inline u32 gr_fecs_current_ctx_target_v(u32 r) +{ + return (r >> 28U) & 0x3U; +} +static inline u32 gr_fecs_current_ctx_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_current_ctx_target_sys_mem_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 gr_fecs_current_ctx_target_sys_mem_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 gr_fecs_current_ctx_valid_s(void) +{ + return 1U; +} +static inline u32 gr_fecs_current_ctx_valid_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 gr_fecs_current_ctx_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_fecs_current_ctx_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 gr_fecs_current_ctx_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_method_data_r(void) +{ + return 0x00409500U; +} +static inline u32 gr_fecs_method_push_r(void) +{ + return 0x00409504U; +} +static inline u32 gr_fecs_method_push_adr_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 gr_fecs_method_push_adr_bind_pointer_v(void) +{ + return 0x00000003U; +} +static inline u32 gr_fecs_method_push_adr_bind_pointer_f(void) +{ + return 0x3U; +} +static inline u32 gr_fecs_method_push_adr_discover_image_size_v(void) +{ + return 0x00000010U; +} +static inline u32 gr_fecs_method_push_adr_wfi_golden_save_v(void) +{ + return 0x00000009U; +} +static inline u32 gr_fecs_method_push_adr_restore_golden_v(void) +{ + return 0x00000015U; +} +static inline u32 gr_fecs_method_push_adr_discover_zcull_image_size_v(void) +{ + return 0x00000016U; +} +static inline u32 gr_fecs_method_push_adr_discover_pm_image_size_v(void) +{ + return 0x00000025U; +} +static inline u32 gr_fecs_method_push_adr_discover_reglist_image_size_v(void) +{ + return 0x00000030U; +} +static inline u32 gr_fecs_method_push_adr_set_reglist_bind_instance_v(void) +{ + return 0x00000031U; +} +static inline u32 gr_fecs_method_push_adr_set_reglist_virtual_address_v(void) +{ + return 0x00000032U; +} +static inline u32 gr_fecs_method_push_adr_stop_ctxsw_v(void) +{ + return 0x00000038U; +} +static inline u32 gr_fecs_method_push_adr_start_ctxsw_v(void) +{ + return 0x00000039U; +} +static inline u32 gr_fecs_method_push_adr_set_watchdog_timeout_f(void) +{ + return 0x21U; +} +static inline u32 gr_fecs_method_push_adr_discover_preemption_image_size_v(void) +{ + return 0x0000001aU; +} +static inline u32 gr_fecs_method_push_adr_halt_pipeline_v(void) +{ + return 0x00000004U; +} +static inline u32 gr_fecs_method_push_adr_configure_interrupt_completion_option_v(void) +{ + return 0x0000003aU; +} +static inline u32 gr_fecs_host_int_status_r(void) +{ + return 0x00409c18U; +} +static inline u32 gr_fecs_host_int_status_fault_during_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 gr_fecs_host_int_status_umimp_firmware_method_f(u32 v) +{ + return (v & 0x1U) << 17U; +} +static inline u32 gr_fecs_host_int_status_umimp_illegal_method_f(u32 v) +{ + return (v & 0x1U) << 18U; +} +static inline u32 gr_fecs_host_int_status_ctxsw_intr_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_fecs_host_int_status_ecc_corrected_f(u32 v) +{ + return (v & 0x1U) << 21U; +} +static inline u32 gr_fecs_host_int_status_ecc_corrected_m(void) +{ + return 0x1U << 21U; +} +static inline u32 gr_fecs_host_int_status_ecc_uncorrected_f(u32 v) +{ + return (v & 0x1U) << 22U; +} +static inline u32 gr_fecs_host_int_status_ecc_uncorrected_m(void) +{ + return 0x1U << 22U; +} +static inline u32 gr_fecs_host_int_clear_r(void) +{ + return 0x00409c20U; +} +static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 gr_fecs_host_int_clear_ctxsw_intr1_clear_f(void) +{ + return 0x2U; +} +static inline u32 gr_fecs_host_int_enable_r(void) +{ + return 0x00409c24U; +} +static inline u32 gr_fecs_host_int_enable_ctxsw_intr1_enable_f(void) +{ + return 0x2U; +} +static inline u32 gr_fecs_host_int_enable_fault_during_ctxsw_enable_f(void) +{ + return 0x10000U; +} +static inline u32 gr_fecs_host_int_enable_umimp_firmware_method_enable_f(void) +{ + return 0x20000U; +} +static inline u32 gr_fecs_host_int_enable_umimp_illegal_method_enable_f(void) +{ + return 0x40000U; +} +static inline u32 gr_fecs_host_int_enable_watchdog_enable_f(void) +{ + return 0x80000U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_r(void) +{ + return 0x00409614U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_sys_halt_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_halt_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_halt_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_sys_engine_reset_disabled_f(void) +{ + return 0x10U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_engine_reset_disabled_f(void) +{ + return 0x20U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_engine_reset_disabled_f(void) +{ + return 0x40U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_enabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_sys_context_reset_disabled_f(void) +{ + return 0x100U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_enabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_gpc_context_reset_disabled_f(void) +{ + return 0x200U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_s(void) +{ + return 1U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_f(u32 v) +{ + return (v & 0x1U) << 10U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_m(void) +{ + return 0x1U << 10U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_v(u32 r) +{ + return (r >> 10U) & 0x1U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_enabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_fecs_ctxsw_reset_ctl_be_context_reset_disabled_f(void) +{ + return 0x400U; +} +static inline u32 gr_fecs_ctx_state_store_major_rev_id_r(void) +{ + return 0x0040960cU; +} +static inline u32 gr_fecs_ctxsw_mailbox_r(u32 i) +{ + return 0x00409800U + i*4U; +} +static inline u32 gr_fecs_ctxsw_mailbox__size_1_v(void) +{ + return 0x00000010U; +} +static inline u32 gr_fecs_ctxsw_mailbox_value_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_fecs_ctxsw_mailbox_value_pass_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_fecs_ctxsw_mailbox_value_fail_v(void) +{ + return 0x00000002U; +} +static inline u32 gr_fecs_ctxsw_mailbox_set_r(u32 i) +{ + return 0x004098c0U + i*4U; +} +static inline u32 gr_fecs_ctxsw_mailbox_set_value_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_fecs_ctxsw_mailbox_clear_r(u32 i) +{ + return 0x00409840U + i*4U; +} +static inline u32 gr_fecs_ctxsw_mailbox_clear_value_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_fecs_fs_r(void) +{ + return 0x00409604U; +} +static inline u32 gr_fecs_fs_num_available_gpcs_s(void) +{ + return 5U; +} +static inline u32 gr_fecs_fs_num_available_gpcs_f(u32 v) +{ + return (v & 0x1fU) << 0U; +} +static inline u32 gr_fecs_fs_num_available_gpcs_m(void) +{ + return 0x1fU << 0U; +} +static inline u32 gr_fecs_fs_num_available_gpcs_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 gr_fecs_fs_num_available_fbps_s(void) +{ + return 5U; +} +static inline u32 gr_fecs_fs_num_available_fbps_f(u32 v) +{ + return (v & 0x1fU) << 16U; +} +static inline u32 gr_fecs_fs_num_available_fbps_m(void) +{ + return 0x1fU << 16U; +} +static inline u32 gr_fecs_fs_num_available_fbps_v(u32 r) +{ + return (r >> 16U) & 0x1fU; +} +static inline u32 gr_fecs_cfg_r(void) +{ + return 0x00409620U; +} +static inline u32 gr_fecs_cfg_imem_sz_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 gr_fecs_rc_lanes_r(void) +{ + return 0x00409880U; +} +static inline u32 gr_fecs_rc_lanes_num_chains_s(void) +{ + return 6U; +} +static inline u32 gr_fecs_rc_lanes_num_chains_f(u32 v) +{ + return (v & 0x3fU) << 0U; +} +static inline u32 gr_fecs_rc_lanes_num_chains_m(void) +{ + return 0x3fU << 0U; +} +static inline u32 gr_fecs_rc_lanes_num_chains_v(u32 r) +{ + return (r >> 0U) & 0x3fU; +} +static inline u32 gr_fecs_ctxsw_status_1_r(void) +{ + return 0x00409400U; +} +static inline u32 gr_fecs_ctxsw_status_1_arb_busy_s(void) +{ + return 1U; +} +static inline u32 gr_fecs_ctxsw_status_1_arb_busy_f(u32 v) +{ + return (v & 0x1U) << 12U; +} +static inline u32 gr_fecs_ctxsw_status_1_arb_busy_m(void) +{ + return 0x1U << 12U; +} +static inline u32 gr_fecs_ctxsw_status_1_arb_busy_v(u32 r) +{ + return (r >> 12U) & 0x1U; +} +static inline u32 gr_fecs_arb_ctx_adr_r(void) +{ + return 0x00409a24U; +} +static inline u32 gr_fecs_new_ctx_r(void) +{ + return 0x00409b04U; +} +static inline u32 gr_fecs_new_ctx_ptr_s(void) +{ + return 28U; +} +static inline u32 gr_fecs_new_ctx_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 gr_fecs_new_ctx_ptr_m(void) +{ + return 0xfffffffU << 0U; +} +static inline u32 gr_fecs_new_ctx_ptr_v(u32 r) +{ + return (r >> 0U) & 0xfffffffU; +} +static inline u32 gr_fecs_new_ctx_target_s(void) +{ + return 2U; +} +static inline u32 gr_fecs_new_ctx_target_f(u32 v) +{ + return (v & 0x3U) << 28U; +} +static inline u32 gr_fecs_new_ctx_target_m(void) +{ + return 0x3U << 28U; +} +static inline u32 gr_fecs_new_ctx_target_v(u32 r) +{ + return (r >> 28U) & 0x3U; +} +static inline u32 gr_fecs_new_ctx_valid_s(void) +{ + return 1U; +} +static inline u32 gr_fecs_new_ctx_valid_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 gr_fecs_new_ctx_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_fecs_new_ctx_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 gr_fecs_arb_ctx_ptr_r(void) +{ + return 0x00409a0cU; +} +static inline u32 gr_fecs_arb_ctx_ptr_ptr_s(void) +{ + return 28U; +} +static inline u32 gr_fecs_arb_ctx_ptr_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 gr_fecs_arb_ctx_ptr_ptr_m(void) +{ + return 0xfffffffU << 0U; +} +static inline u32 gr_fecs_arb_ctx_ptr_ptr_v(u32 r) +{ + return (r >> 0U) & 0xfffffffU; +} +static inline u32 gr_fecs_arb_ctx_ptr_target_s(void) +{ + return 2U; +} +static inline u32 gr_fecs_arb_ctx_ptr_target_f(u32 v) +{ + return (v & 0x3U) << 28U; +} +static inline u32 gr_fecs_arb_ctx_ptr_target_m(void) +{ + return 0x3U << 28U; +} +static inline u32 gr_fecs_arb_ctx_ptr_target_v(u32 r) +{ + return (r >> 28U) & 0x3U; +} +static inline u32 gr_fecs_arb_ctx_cmd_r(void) +{ + return 0x00409a10U; +} +static inline u32 gr_fecs_arb_ctx_cmd_cmd_s(void) +{ + return 5U; +} +static inline u32 gr_fecs_arb_ctx_cmd_cmd_f(u32 v) +{ + return (v & 0x1fU) << 0U; +} +static inline u32 gr_fecs_arb_ctx_cmd_cmd_m(void) +{ + return 0x1fU << 0U; +} +static inline u32 gr_fecs_arb_ctx_cmd_cmd_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 gr_fecs_ctxsw_status_fe_0_r(void) +{ + return 0x00409c00U; +} +static inline u32 gr_gpc0_gpccs_ctxsw_status_gpc_0_r(void) +{ + return 0x00502c04U; +} +static inline u32 gr_gpc0_gpccs_ctxsw_status_1_r(void) +{ + return 0x00502400U; +} +static inline u32 gr_fecs_ctxsw_idlestate_r(void) +{ + return 0x00409420U; +} +static inline u32 gr_fecs_feature_override_ecc_r(void) +{ + return 0x00409658U; +} +static inline u32 gr_fecs_feature_override_ecc_sm_lrf_override_v(u32 r) +{ + return (r >> 3U) & 0x1U; +} +static inline u32 gr_fecs_feature_override_ecc_ltc_override_v(u32 r) +{ + return (r >> 15U) & 0x1U; +} +static inline u32 gr_fecs_feature_override_ecc_sm_lrf_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 gr_fecs_feature_override_ecc_ltc_v(u32 r) +{ + return (r >> 12U) & 0x1U; +} +static inline u32 gr_gpc0_gpccs_ctxsw_idlestate_r(void) +{ + return 0x00502420U; +} +static inline u32 gr_rstr2d_gpc_map_r(u32 i) +{ + return 0x0040780cU + i*4U; +} +static inline u32 gr_rstr2d_map_table_cfg_r(void) +{ + return 0x004078bcU; +} +static inline u32 gr_rstr2d_map_table_cfg_row_offset_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_rstr2d_map_table_cfg_num_entries_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_pd_hww_esr_r(void) +{ + return 0x00406018U; +} +static inline u32 gr_pd_hww_esr_reset_active_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_pd_hww_esr_en_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_pd_num_tpc_per_gpc_r(u32 i) +{ + return 0x00406028U + i*4U; +} +static inline u32 gr_pd_num_tpc_per_gpc__size_1_v(void) +{ + return 0x00000004U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count0_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count1_f(u32 v) +{ + return (v & 0xfU) << 4U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count2_f(u32 v) +{ + return (v & 0xfU) << 8U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count3_f(u32 v) +{ + return (v & 0xfU) << 12U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count4_f(u32 v) +{ + return (v & 0xfU) << 16U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count5_f(u32 v) +{ + return (v & 0xfU) << 20U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count6_f(u32 v) +{ + return (v & 0xfU) << 24U; +} +static inline u32 gr_pd_num_tpc_per_gpc_count7_f(u32 v) +{ + return (v & 0xfU) << 28U; +} +static inline u32 gr_pd_ab_dist_cfg0_r(void) +{ + return 0x004064c0U; +} +static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_en_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_pd_ab_dist_cfg0_timeslice_enable_dis_f(void) +{ + return 0x0U; +} +static inline u32 gr_pd_ab_dist_cfg1_r(void) +{ + return 0x004064c4U; +} +static inline u32 gr_pd_ab_dist_cfg1_max_batches_init_f(void) +{ + return 0xffffU; +} +static inline u32 gr_pd_ab_dist_cfg1_max_output_f(u32 v) +{ + return (v & 0xffffU) << 16U; +} +static inline u32 gr_pd_ab_dist_cfg1_max_output_granularity_v(void) +{ + return 0x00000080U; +} +static inline u32 gr_pd_ab_dist_cfg2_r(void) +{ + return 0x004064c8U; +} +static inline u32 gr_pd_ab_dist_cfg2_token_limit_f(u32 v) +{ + return (v & 0x1fffU) << 0U; +} +static inline u32 gr_pd_ab_dist_cfg2_token_limit_init_v(void) +{ + return 0x00000380U; +} +static inline u32 gr_pd_ab_dist_cfg2_state_limit_f(u32 v) +{ + return (v & 0x1fffU) << 16U; +} +static inline u32 gr_pd_ab_dist_cfg2_state_limit_scc_bundle_granularity_v(void) +{ + return 0x00000020U; +} +static inline u32 gr_pd_ab_dist_cfg2_state_limit_min_gpm_fifo_depths_v(void) +{ + return 0x00000302U; +} +static inline u32 gr_pd_dist_skip_table_r(u32 i) +{ + return 0x004064d0U + i*4U; +} +static inline u32 gr_pd_dist_skip_table__size_1_v(void) +{ + return 0x00000008U; +} +static inline u32 gr_pd_dist_skip_table_gpc_4n0_mask_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_pd_dist_skip_table_gpc_4n1_mask_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_pd_dist_skip_table_gpc_4n2_mask_f(u32 v) +{ + return (v & 0xffU) << 16U; +} +static inline u32 gr_pd_dist_skip_table_gpc_4n3_mask_f(u32 v) +{ + return (v & 0xffU) << 24U; +} +static inline u32 gr_ds_debug_r(void) +{ + return 0x00405800U; +} +static inline u32 gr_ds_debug_timeslice_mode_disable_f(void) +{ + return 0x0U; +} +static inline u32 gr_ds_debug_timeslice_mode_enable_f(void) +{ + return 0x8000000U; +} +static inline u32 gr_ds_zbc_color_r_r(void) +{ + return 0x00405804U; +} +static inline u32 gr_ds_zbc_color_r_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_ds_zbc_color_g_r(void) +{ + return 0x00405808U; +} +static inline u32 gr_ds_zbc_color_g_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_ds_zbc_color_b_r(void) +{ + return 0x0040580cU; +} +static inline u32 gr_ds_zbc_color_b_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_ds_zbc_color_a_r(void) +{ + return 0x00405810U; +} +static inline u32 gr_ds_zbc_color_a_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_ds_zbc_color_fmt_r(void) +{ + return 0x00405814U; +} +static inline u32 gr_ds_zbc_color_fmt_val_f(u32 v) +{ + return (v & 0x7fU) << 0U; +} +static inline u32 gr_ds_zbc_color_fmt_val_invalid_f(void) +{ + return 0x0U; +} +static inline u32 gr_ds_zbc_color_fmt_val_zero_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_ds_zbc_color_fmt_val_unorm_one_v(void) +{ + return 0x00000002U; +} +static inline u32 gr_ds_zbc_color_fmt_val_rf32_gf32_bf32_af32_v(void) +{ + return 0x00000004U; +} +static inline u32 gr_ds_zbc_color_fmt_val_a8_b8_g8_r8_v(void) +{ + return 0x00000028U; +} +static inline u32 gr_ds_zbc_z_r(void) +{ + return 0x00405818U; +} +static inline u32 gr_ds_zbc_z_val_s(void) +{ + return 32U; +} +static inline u32 gr_ds_zbc_z_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_ds_zbc_z_val_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 gr_ds_zbc_z_val_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 gr_ds_zbc_z_val__init_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_ds_zbc_z_val__init_f(void) +{ + return 0x0U; +} +static inline u32 gr_ds_zbc_z_fmt_r(void) +{ + return 0x0040581cU; +} +static inline u32 gr_ds_zbc_z_fmt_val_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_ds_zbc_z_fmt_val_invalid_f(void) +{ + return 0x0U; +} +static inline u32 gr_ds_zbc_z_fmt_val_fp32_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_ds_zbc_tbl_index_r(void) +{ + return 0x00405820U; +} +static inline u32 gr_ds_zbc_tbl_index_val_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_ds_zbc_tbl_ld_r(void) +{ + return 0x00405824U; +} +static inline u32 gr_ds_zbc_tbl_ld_select_c_f(void) +{ + return 0x0U; +} +static inline u32 gr_ds_zbc_tbl_ld_select_z_f(void) +{ + return 0x1U; +} +static inline u32 gr_ds_zbc_tbl_ld_action_write_f(void) +{ + return 0x0U; +} +static inline u32 gr_ds_zbc_tbl_ld_trigger_active_f(void) +{ + return 0x4U; +} +static inline u32 gr_ds_tga_constraintlogic_beta_r(void) +{ + return 0x00405830U; +} +static inline u32 gr_ds_tga_constraintlogic_beta_cbsize_f(u32 v) +{ + return (v & 0x3fffffU) << 0U; +} +static inline u32 gr_ds_tga_constraintlogic_alpha_r(void) +{ + return 0x0040585cU; +} +static inline u32 gr_ds_tga_constraintlogic_alpha_cbsize_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_ds_hww_esr_r(void) +{ + return 0x00405840U; +} +static inline u32 gr_ds_hww_esr_reset_s(void) +{ + return 1U; +} +static inline u32 gr_ds_hww_esr_reset_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 gr_ds_hww_esr_reset_m(void) +{ + return 0x1U << 30U; +} +static inline u32 gr_ds_hww_esr_reset_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 gr_ds_hww_esr_reset_task_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_ds_hww_esr_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_ds_hww_esr_en_enabled_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_ds_hww_esr_2_r(void) +{ + return 0x00405848U; +} +static inline u32 gr_ds_hww_esr_2_reset_s(void) +{ + return 1U; +} +static inline u32 gr_ds_hww_esr_2_reset_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 gr_ds_hww_esr_2_reset_m(void) +{ + return 0x1U << 30U; +} +static inline u32 gr_ds_hww_esr_2_reset_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 gr_ds_hww_esr_2_reset_task_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_ds_hww_esr_2_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_ds_hww_esr_2_en_enabled_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_ds_hww_report_mask_r(void) +{ + return 0x00405844U; +} +static inline u32 gr_ds_hww_report_mask_sph0_err_report_f(void) +{ + return 0x1U; +} +static inline u32 gr_ds_hww_report_mask_sph1_err_report_f(void) +{ + return 0x2U; +} +static inline u32 gr_ds_hww_report_mask_sph2_err_report_f(void) +{ + return 0x4U; +} +static inline u32 gr_ds_hww_report_mask_sph3_err_report_f(void) +{ + return 0x8U; +} +static inline u32 gr_ds_hww_report_mask_sph4_err_report_f(void) +{ + return 0x10U; +} +static inline u32 gr_ds_hww_report_mask_sph5_err_report_f(void) +{ + return 0x20U; +} +static inline u32 gr_ds_hww_report_mask_sph6_err_report_f(void) +{ + return 0x40U; +} +static inline u32 gr_ds_hww_report_mask_sph7_err_report_f(void) +{ + return 0x80U; +} +static inline u32 gr_ds_hww_report_mask_sph8_err_report_f(void) +{ + return 0x100U; +} +static inline u32 gr_ds_hww_report_mask_sph9_err_report_f(void) +{ + return 0x200U; +} +static inline u32 gr_ds_hww_report_mask_sph10_err_report_f(void) +{ + return 0x400U; +} +static inline u32 gr_ds_hww_report_mask_sph11_err_report_f(void) +{ + return 0x800U; +} +static inline u32 gr_ds_hww_report_mask_sph12_err_report_f(void) +{ + return 0x1000U; +} +static inline u32 gr_ds_hww_report_mask_sph13_err_report_f(void) +{ + return 0x2000U; +} +static inline u32 gr_ds_hww_report_mask_sph14_err_report_f(void) +{ + return 0x4000U; +} +static inline u32 gr_ds_hww_report_mask_sph15_err_report_f(void) +{ + return 0x8000U; +} +static inline u32 gr_ds_hww_report_mask_sph16_err_report_f(void) +{ + return 0x10000U; +} +static inline u32 gr_ds_hww_report_mask_sph17_err_report_f(void) +{ + return 0x20000U; +} +static inline u32 gr_ds_hww_report_mask_sph18_err_report_f(void) +{ + return 0x40000U; +} +static inline u32 gr_ds_hww_report_mask_sph19_err_report_f(void) +{ + return 0x80000U; +} +static inline u32 gr_ds_hww_report_mask_sph20_err_report_f(void) +{ + return 0x100000U; +} +static inline u32 gr_ds_hww_report_mask_sph21_err_report_f(void) +{ + return 0x200000U; +} +static inline u32 gr_ds_hww_report_mask_sph22_err_report_f(void) +{ + return 0x400000U; +} +static inline u32 gr_ds_hww_report_mask_sph23_err_report_f(void) +{ + return 0x800000U; +} +static inline u32 gr_ds_hww_report_mask_2_r(void) +{ + return 0x0040584cU; +} +static inline u32 gr_ds_hww_report_mask_2_sph24_err_report_f(void) +{ + return 0x1U; +} +static inline u32 gr_ds_num_tpc_per_gpc_r(u32 i) +{ + return 0x00405870U + i*4U; +} +static inline u32 gr_scc_bundle_cb_base_r(void) +{ + return 0x00408004U; +} +static inline u32 gr_scc_bundle_cb_base_addr_39_8_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_scc_bundle_cb_base_addr_39_8_align_bits_v(void) +{ + return 0x00000008U; +} +static inline u32 gr_scc_bundle_cb_size_r(void) +{ + return 0x00408008U; +} +static inline u32 gr_scc_bundle_cb_size_div_256b_f(u32 v) +{ + return (v & 0x7ffU) << 0U; +} +static inline u32 gr_scc_bundle_cb_size_div_256b__prod_v(void) +{ + return 0x00000030U; +} +static inline u32 gr_scc_bundle_cb_size_div_256b_byte_granularity_v(void) +{ + return 0x00000100U; +} +static inline u32 gr_scc_bundle_cb_size_valid_false_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_scc_bundle_cb_size_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 gr_scc_bundle_cb_size_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_scc_pagepool_base_r(void) +{ + return 0x0040800cU; +} +static inline u32 gr_scc_pagepool_base_addr_39_8_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_scc_pagepool_base_addr_39_8_align_bits_v(void) +{ + return 0x00000008U; +} +static inline u32 gr_scc_pagepool_r(void) +{ + return 0x00408010U; +} +static inline u32 gr_scc_pagepool_total_pages_f(u32 v) +{ + return (v & 0x3ffU) << 0U; +} +static inline u32 gr_scc_pagepool_total_pages_hwmax_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_scc_pagepool_total_pages_hwmax_value_v(void) +{ + return 0x00000200U; +} +static inline u32 gr_scc_pagepool_total_pages_byte_granularity_v(void) +{ + return 0x00000100U; +} +static inline u32 gr_scc_pagepool_max_valid_pages_s(void) +{ + return 10U; +} +static inline u32 gr_scc_pagepool_max_valid_pages_f(u32 v) +{ + return (v & 0x3ffU) << 10U; +} +static inline u32 gr_scc_pagepool_max_valid_pages_m(void) +{ + return 0x3ffU << 10U; +} +static inline u32 gr_scc_pagepool_max_valid_pages_v(u32 r) +{ + return (r >> 10U) & 0x3ffU; +} +static inline u32 gr_scc_pagepool_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_scc_init_r(void) +{ + return 0x0040802cU; +} +static inline u32 gr_scc_init_ram_trigger_f(void) +{ + return 0x1U; +} +static inline u32 gr_scc_hww_esr_r(void) +{ + return 0x00408030U; +} +static inline u32 gr_scc_hww_esr_reset_active_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_scc_hww_esr_en_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_sked_hww_esr_r(void) +{ + return 0x00407020U; +} +static inline u32 gr_sked_hww_esr_reset_active_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_sked_hww_esr_en_r(void) +{ + return 0x00407024U; +} +static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_m(void) +{ + return 0x1U << 25U; +} +static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_sked_hww_esr_en_skedcheck18_l1_config_too_small_enabled_f(void) +{ + return 0x2000000U; +} +static inline u32 gr_cwd_fs_r(void) +{ + return 0x00405b00U; +} +static inline u32 gr_cwd_fs_num_gpcs_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_cwd_fs_num_tpcs_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_cwd_gpc_tpc_id_r(u32 i) +{ + return 0x00405b60U + i*4U; +} +static inline u32 gr_cwd_gpc_tpc_id_tpc0_s(void) +{ + return 4U; +} +static inline u32 gr_cwd_gpc_tpc_id_tpc0_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_cwd_gpc_tpc_id_gpc0_s(void) +{ + return 4U; +} +static inline u32 gr_cwd_gpc_tpc_id_gpc0_f(u32 v) +{ + return (v & 0xfU) << 4U; +} +static inline u32 gr_cwd_gpc_tpc_id_tpc1_f(u32 v) +{ + return (v & 0xfU) << 8U; +} +static inline u32 gr_cwd_sm_id_r(u32 i) +{ + return 0x00405ba0U + i*4U; +} +static inline u32 gr_cwd_sm_id__size_1_v(void) +{ + return 0x00000010U; +} +static inline u32 gr_cwd_sm_id_tpc0_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_cwd_sm_id_tpc1_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_gpc0_fs_gpc_r(void) +{ + return 0x00502608U; +} +static inline u32 gr_gpc0_fs_gpc_num_available_tpcs_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 gr_gpc0_fs_gpc_num_available_zculls_v(u32 r) +{ + return (r >> 16U) & 0x1fU; +} +static inline u32 gr_gpc0_cfg_r(void) +{ + return 0x00502620U; +} +static inline u32 gr_gpc0_cfg_imem_sz_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 gr_gpccs_rc_lanes_r(void) +{ + return 0x00502880U; +} +static inline u32 gr_gpccs_rc_lanes_num_chains_s(void) +{ + return 6U; +} +static inline u32 gr_gpccs_rc_lanes_num_chains_f(u32 v) +{ + return (v & 0x3fU) << 0U; +} +static inline u32 gr_gpccs_rc_lanes_num_chains_m(void) +{ + return 0x3fU << 0U; +} +static inline u32 gr_gpccs_rc_lanes_num_chains_v(u32 r) +{ + return (r >> 0U) & 0x3fU; +} +static inline u32 gr_gpccs_rc_lane_size_r(void) +{ + return 0x00502910U; +} +static inline u32 gr_gpccs_rc_lane_size_v_s(void) +{ + return 24U; +} +static inline u32 gr_gpccs_rc_lane_size_v_f(u32 v) +{ + return (v & 0xffffffU) << 0U; +} +static inline u32 gr_gpccs_rc_lane_size_v_m(void) +{ + return 0xffffffU << 0U; +} +static inline u32 gr_gpccs_rc_lane_size_v_v(u32 r) +{ + return (r >> 0U) & 0xffffffU; +} +static inline u32 gr_gpccs_rc_lane_size_v_0_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpccs_rc_lane_size_v_0_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpc0_zcull_fs_r(void) +{ + return 0x00500910U; +} +static inline u32 gr_gpc0_zcull_fs_num_sms_f(u32 v) +{ + return (v & 0x1ffU) << 0U; +} +static inline u32 gr_gpc0_zcull_fs_num_active_banks_f(u32 v) +{ + return (v & 0xfU) << 16U; +} +static inline u32 gr_gpc0_zcull_ram_addr_r(void) +{ + return 0x00500914U; +} +static inline u32 gr_gpc0_zcull_ram_addr_tiles_per_hypertile_row_per_gpc_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_gpc0_zcull_ram_addr_row_offset_f(u32 v) +{ + return (v & 0xfU) << 8U; +} +static inline u32 gr_gpc0_zcull_sm_num_rcp_r(void) +{ + return 0x00500918U; +} +static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative_f(u32 v) +{ + return (v & 0xffffffU) << 0U; +} +static inline u32 gr_gpc0_zcull_sm_num_rcp_conservative__max_v(void) +{ + return 0x00800000U; +} +static inline u32 gr_gpc0_zcull_total_ram_size_r(void) +{ + return 0x00500920U; +} +static inline u32 gr_gpc0_zcull_total_ram_size_num_aliquots_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_zcull_zcsize_r(u32 i) +{ + return 0x00500a04U + i*32U; +} +static inline u32 gr_gpc0_zcull_zcsize_height_subregion__multiple_v(void) +{ + return 0x00000040U; +} +static inline u32 gr_gpc0_zcull_zcsize_width_subregion__multiple_v(void) +{ + return 0x00000010U; +} +static inline u32 gr_gpc0_gpm_pd_sm_id_r(u32 i) +{ + return 0x00500c10U + i*4U; +} +static inline u32 gr_gpc0_gpm_pd_sm_id_id_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_r(u32 i) +{ + return 0x00500c30U + i*4U; +} +static inline u32 gr_gpc0_gpm_pd_pes_tpc_id_mask_mask_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 gr_gpc0_tpc0_pe_cfg_smid_r(void) +{ + return 0x00504088U; +} +static inline u32 gr_gpc0_tpc0_pe_cfg_smid_value_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_tpc0_sm_cfg_r(void) +{ + return 0x00504608U; +} +static inline u32 gr_gpc0_tpc0_sm_cfg_tpc_id_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_tpc0_sm_cfg_tpc_id_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_gpc0_tpc0_sm_arch_r(void) +{ + return 0x00504330U; +} +static inline u32 gr_gpc0_tpc0_sm_arch_warp_count_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 gr_gpc0_tpc0_sm_arch_spa_version_v(u32 r) +{ + return (r >> 8U) & 0xfffU; +} +static inline u32 gr_gpc0_tpc0_sm_arch_sm_version_v(u32 r) +{ + return (r >> 20U) & 0xfffU; +} +static inline u32 gr_gpc0_ppc0_pes_vsc_strem_r(void) +{ + return 0x00503018U; +} +static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_gpc0_ppc0_pes_vsc_strem_master_pe_true_f(void) +{ + return 0x1U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_r(void) +{ + return 0x005030c0U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_f(u32 v) +{ + return (v & 0x3fffffU) << 0U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_m(void) +{ + return 0x3fffffU << 0U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_default_v(void) +{ + return 0x00000800U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_gfxp_v(void) +{ + return 0x00001100U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_size_v_granularity_v(void) +{ + return 0x00000020U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_cb_offset_r(void) +{ + return 0x005030f4U; +} +static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_r(void) +{ + return 0x005030e4U; +} +static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_default_v(void) +{ + return 0x00000800U; +} +static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_size_v_granularity_v(void) +{ + return 0x00000020U; +} +static inline u32 gr_gpc0_ppc0_cbm_alpha_cb_offset_r(void) +{ + return 0x005030f8U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_r(void) +{ + return 0x005030f0U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_f(u32 v) +{ + return (v & 0x3fffffU) << 0U; +} +static inline u32 gr_gpc0_ppc0_cbm_beta_steady_state_cb_size_v_default_v(void) +{ + return 0x00000800U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_r(void) +{ + return 0x00419e00U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_0_base_addr_43_12_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_r(void) +{ + return 0x00419e04U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_s(void) +{ + return 21U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_f(u32 v) +{ + return (v & 0x1fffffU) << 0U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_m(void) +{ + return 0x1fffffU << 0U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_v(u32 r) +{ + return (r >> 0U) & 0x1fffffU; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_size_div_128b_granularity_f(void) +{ + return 0x80U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_s(void) +{ + return 1U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 gr_gpcs_tpcs_tex_rm_cb_1_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_gpccs_falcon_addr_r(void) +{ + return 0x0041a0acU; +} +static inline u32 gr_gpccs_falcon_addr_lsb_s(void) +{ + return 6U; +} +static inline u32 gr_gpccs_falcon_addr_lsb_f(u32 v) +{ + return (v & 0x3fU) << 0U; +} +static inline u32 gr_gpccs_falcon_addr_lsb_m(void) +{ + return 0x3fU << 0U; +} +static inline u32 gr_gpccs_falcon_addr_lsb_v(u32 r) +{ + return (r >> 0U) & 0x3fU; +} +static inline u32 gr_gpccs_falcon_addr_lsb_init_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpccs_falcon_addr_lsb_init_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpccs_falcon_addr_msb_s(void) +{ + return 6U; +} +static inline u32 gr_gpccs_falcon_addr_msb_f(u32 v) +{ + return (v & 0x3fU) << 6U; +} +static inline u32 gr_gpccs_falcon_addr_msb_m(void) +{ + return 0x3fU << 6U; +} +static inline u32 gr_gpccs_falcon_addr_msb_v(u32 r) +{ + return (r >> 6U) & 0x3fU; +} +static inline u32 gr_gpccs_falcon_addr_msb_init_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpccs_falcon_addr_msb_init_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpccs_falcon_addr_ext_s(void) +{ + return 12U; +} +static inline u32 gr_gpccs_falcon_addr_ext_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 gr_gpccs_falcon_addr_ext_m(void) +{ + return 0xfffU << 0U; +} +static inline u32 gr_gpccs_falcon_addr_ext_v(u32 r) +{ + return (r >> 0U) & 0xfffU; +} +static inline u32 gr_gpccs_cpuctl_r(void) +{ + return 0x0041a100U; +} +static inline u32 gr_gpccs_cpuctl_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 gr_gpccs_dmactl_r(void) +{ + return 0x0041a10cU; +} +static inline u32 gr_gpccs_dmactl_require_ctx_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_gpccs_dmactl_dmem_scrubbing_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_gpccs_dmactl_imem_scrubbing_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_gpccs_imemc_r(u32 i) +{ + return 0x0041a180U + i*16U; +} +static inline u32 gr_gpccs_imemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 gr_gpccs_imemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_gpccs_imemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 gr_gpccs_imemd_r(u32 i) +{ + return 0x0041a184U + i*16U; +} +static inline u32 gr_gpccs_imemt_r(u32 i) +{ + return 0x0041a188U + i*16U; +} +static inline u32 gr_gpccs_imemt__size_1_v(void) +{ + return 0x00000004U; +} +static inline u32 gr_gpccs_imemt_tag_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpccs_dmemc_r(u32 i) +{ + return 0x0041a1c0U + i*8U; +} +static inline u32 gr_gpccs_dmemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 gr_gpccs_dmemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_gpccs_dmemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 gr_gpccs_dmemd_r(u32 i) +{ + return 0x0041a1c4U + i*8U; +} +static inline u32 gr_gpccs_ctxsw_mailbox_r(u32 i) +{ + return 0x0041a800U + i*4U; +} +static inline u32 gr_gpccs_ctxsw_mailbox_value_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_r(void) +{ + return 0x00418e24U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_s(void) +{ + return 32U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_base_addr_39_8_init_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_r(void) +{ + return 0x00418e28U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_s(void) +{ + return 11U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_f(u32 v) +{ + return (v & 0x7ffU) << 0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_m(void) +{ + return 0x7ffU << 0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_v(u32 r) +{ + return (r >> 0U) & 0x7ffU; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_v(void) +{ + return 0x00000030U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_div_256b_init_f(void) +{ + return 0x30U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_s(void) +{ + return 1U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpcs_swdx_bundle_cb_size_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_r(void) +{ + return 0x005001dcU; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_default_v(void) +{ + return 0x00000170U; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_size_256b_byte_granularity_v(void) +{ + return 0x00000100U; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_r(void) +{ + return 0x005001d8U; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpc0_swdx_rm_spill_buffer_addr_39_8_align_bits_v(void) +{ + return 0x00000008U; +} +static inline u32 gr_gpcs_swdx_beta_cb_ctrl_r(void) +{ + return 0x004181e4U; +} +static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 gr_gpcs_swdx_beta_cb_ctrl_cbes_reserve_gfxp_v(void) +{ + return 0x00000100U; +} +static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_r(void) +{ + return 0x0041befcU; +} +static inline u32 gr_gpcs_ppcs_cbm_beta_cb_ctrl_cbes_reserve_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 gr_gpcs_swdx_tc_beta_cb_size_r(u32 i) +{ + return 0x00418ea0U + i*4U; +} +static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_f(u32 v) +{ + return (v & 0x3fffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_tc_beta_cb_size_v_m(void) +{ + return 0x3fffffU << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_r_r(u32 i) +{ + return 0x00418010U + i*4U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_r_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_g_r(u32 i) +{ + return 0x0041804cU + i*4U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_g_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_b_r(u32 i) +{ + return 0x00418088U + i*4U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_b_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_a_r(u32 i) +{ + return 0x004180c4U + i*4U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_color_a_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_c_01_to_04_format_r(void) +{ + return 0x00418100U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_z_r(u32 i) +{ + return 0x00418110U + i*4U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_z_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_z_01_to_04_format_r(void) +{ + return 0x0041814cU; +} +static inline u32 gr_gpcs_swdx_dss_zbc_s_r(u32 i) +{ + return 0x0041815cU + i*4U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_s_val_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_gpcs_swdx_dss_zbc_s_01_to_04_format_r(void) +{ + return 0x00418198U; +} +static inline u32 gr_gpcs_setup_attrib_cb_base_r(void) +{ + return 0x00418810U; +} +static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 gr_gpcs_setup_attrib_cb_base_addr_39_12_align_bits_v(void) +{ + return 0x0000000cU; +} +static inline u32 gr_gpcs_setup_attrib_cb_base_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_crstr_gpc_map_r(u32 i) +{ + return 0x00418b08U + i*4U; +} +static inline u32 gr_crstr_gpc_map_tile0_f(u32 v) +{ + return (v & 0x1fU) << 0U; +} +static inline u32 gr_crstr_gpc_map_tile1_f(u32 v) +{ + return (v & 0x1fU) << 5U; +} +static inline u32 gr_crstr_gpc_map_tile2_f(u32 v) +{ + return (v & 0x1fU) << 10U; +} +static inline u32 gr_crstr_gpc_map_tile3_f(u32 v) +{ + return (v & 0x1fU) << 15U; +} +static inline u32 gr_crstr_gpc_map_tile4_f(u32 v) +{ + return (v & 0x1fU) << 20U; +} +static inline u32 gr_crstr_gpc_map_tile5_f(u32 v) +{ + return (v & 0x1fU) << 25U; +} +static inline u32 gr_crstr_map_table_cfg_r(void) +{ + return 0x00418bb8U; +} +static inline u32 gr_crstr_map_table_cfg_row_offset_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_crstr_map_table_cfg_num_entries_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_r(u32 i) +{ + return 0x00418980U + i*4U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_0_f(u32 v) +{ + return (v & 0x7U) << 0U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_1_f(u32 v) +{ + return (v & 0x7U) << 4U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_2_f(u32 v) +{ + return (v & 0x7U) << 8U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_3_f(u32 v) +{ + return (v & 0x7U) << 12U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_4_f(u32 v) +{ + return (v & 0x7U) << 16U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_5_f(u32 v) +{ + return (v & 0x7U) << 20U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_6_f(u32 v) +{ + return (v & 0x7U) << 24U; +} +static inline u32 gr_gpcs_zcull_sm_in_gpc_number_map_tile_7_f(u32 v) +{ + return (v & 0x7U) << 28U; +} +static inline u32 gr_gpcs_gpm_pd_cfg_r(void) +{ + return 0x00418c6cU; +} +static inline u32 gr_gpcs_gcc_pagepool_base_r(void) +{ + return 0x00419004U; +} +static inline u32 gr_gpcs_gcc_pagepool_base_addr_39_8_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpcs_gcc_pagepool_r(void) +{ + return 0x00419008U; +} +static inline u32 gr_gpcs_gcc_pagepool_total_pages_f(u32 v) +{ + return (v & 0x3ffU) << 0U; +} +static inline u32 gr_gpcs_tpcs_pe_vaf_r(void) +{ + return 0x0041980cU; +} +static inline u32 gr_gpcs_tpcs_pe_vaf_fast_mode_switch_true_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_r(void) +{ + return 0x00419848U; +} +static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_v_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_f(u32 v) +{ + return (v & 0x1U) << 28U; +} +static inline u32 gr_gpcs_tpcs_pe_pin_cb_global_base_addr_valid_true_f(void) +{ + return 0x10000000U; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_r(void) +{ + return 0x00419c00U; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_debug_timeslice_mode_enabled_f(void) +{ + return 0x8U; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_r(void) +{ + return 0x00419c2cU; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_v_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_f(u32 v) +{ + return (v & 0x1U) << 28U; +} +static inline u32 gr_gpcs_tpcs_mpc_vtg_cb_global_base_addr_valid_true_f(void) +{ + return 0x10000000U; +} +static inline u32 gr_gpcs_tpcs_sms_hww_warp_esr_report_mask_r(void) +{ + return 0x00419ea8U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_r(void) +{ + return 0x00504728U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_error_report_f(void) +{ + return 0x2U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_api_stack_error_report_f(void) +{ + return 0x4U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_wrap_report_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_pc_report_f(void) +{ + return 0x20U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_pc_overflow_report_f(void) +{ + return 0x40U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_reg_report_f(void) +{ + return 0x100U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_encoding_report_f(void) +{ + return 0x200U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_illegal_instr_param_report_f(void) +{ + return 0x800U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_reg_report_f(void) +{ + return 0x2000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_oor_addr_report_f(void) +{ + return 0x4000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_misaligned_addr_report_f(void) +{ + return 0x8000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_addr_space_report_f(void) +{ + return 0x10000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_invalid_const_addr_ldc_report_f(void) +{ + return 0x40000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_mmu_fault_report_f(void) +{ + return 0x800000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_report_mask_stack_overflow_report_f(void) +{ + return 0x400000U; +} +static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_r(void) +{ + return 0x00419d0cU; +} +static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_sm_enabled_f(void) +{ + return 0x2U; +} +static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_tex_enabled_f(void) +{ + return 0x1U; +} +static inline u32 gr_gpcs_tpcs_tpccs_tpc_exception_en_mpc_enabled_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_r(void) +{ + return 0x0050450cU; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_sm_enabled_f(void) +{ + return 0x2U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_en_mpc_enabled_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpcs_gpccs_gpc_exception_en_r(void) +{ + return 0x0041ac94U; +} +static inline u32 gr_gpcs_gpccs_gpc_exception_en_gcc_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 gr_gpcs_gpccs_gpc_exception_en_tpc_f(u32 v) +{ + return (v & 0xffU) << 16U; +} +static inline u32 gr_gpcs_gpccs_gpc_exception_en_gpccs_f(u32 v) +{ + return (v & 0x1U) << 14U; +} +static inline u32 gr_gpcs_gpccs_gpc_exception_en_gpcmmu_f(u32 v) +{ + return (v & 0x1U) << 15U; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_r(void) +{ + return 0x00502c90U; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_gcc_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_v(u32 r) +{ + return (r >> 16U) & 0xffU; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_tpc_0_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_gpccs_f(u32 v) +{ + return (v & 0x1U) << 14U; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_gpccs_m(void) +{ + return 0x1U << 14U; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_gpccs_pending_f(void) +{ + return 0x4000U; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_gpcmmu_f(u32 v) +{ + return (v & 0x1U) << 15U; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_gpcmmu_m(void) +{ + return 0x1U << 15U; +} +static inline u32 gr_gpc0_gpccs_gpc_exception_gpcmmu_pending_f(void) +{ + return 0x8000U; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_r(void) +{ + return 0x00501048U; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_bank0_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_bank1_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_bank0_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_bank1_m(void) +{ + return 0x1U << 5U; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_corrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 8U) & 0x1U; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_uncorrected_err_total_counter_overflow_v(u32 r) +{ + return (r >> 10U) & 0x1U; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_status_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_r(void) +{ + return 0x0050104cU; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_r(void) +{ + return 0x00501054U; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_pri_gpc0_gcc_l15_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_r(void) +{ + return 0x00504508U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_tex_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_sm_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_gpc0_tpc0_tpccs_tpc_exception_mpc_pending_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_r(void) +{ + return 0x00504704U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_on_f(void) +{ + return 0x1U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_debugger_mode_off_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_stop_trigger_disable_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_m(void) +{ + return 0x1U << 3U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_enable_f(void) +{ + return 0x8U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_single_step_mode_disable_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_control0_run_trigger_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_gpc0_tpc0_sm0_warp_valid_mask_0_r(void) +{ + return 0x00504708U; +} +static inline u32 gr_gpc0_tpc0_sm0_warp_valid_mask_1_r(void) +{ + return 0x0050470cU; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_0_r(void) +{ + return 0x00504710U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_pause_mask_1_r(void) +{ + return 0x00504714U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_0_r(void) +{ + return 0x00504718U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_bpt_trap_mask_1_r(void) +{ + return 0x0050471cU; +} +static inline u32 gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_0_r(void) +{ + return 0x00419e90U; +} +static inline u32 gr_gpcs_tpcs_sms_dbgr_bpt_pause_mask_1_r(void) +{ + return 0x00419e94U; +} +static inline u32 gr_gpcs_tpcs_sms_dbgr_status0_r(void) +{ + return 0x00419e80U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_r(void) +{ + return 0x00504700U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_sm_in_trap_mode_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_sm0_dbgr_status0_locked_down_true_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_r(void) +{ + return 0x00504730U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_error_none_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_wrap_id_m(void) +{ + return 0xffU << 16U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_m(void) +{ + return 0xfU << 24U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_addr_error_type_none_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_r(void) +{ + return 0x0050460cU; +} +static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm0_error_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_sm_tpc_esr_sm_sel_sm1_error_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 gr_gpc0_tpc0_sm0_hww_warp_esr_pc_r(void) +{ + return 0x00504738U; +} +static inline u32 gr_gpc0_tpc0_sm_halfctl_ctrl_r(void) +{ + return 0x005043a0U; +} +static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_r(void) +{ + return 0x00419ba0U; +} +static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_gpcs_tpcs_sm_halfctl_ctrl_sctl_read_quad_ctl_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 gr_gpc0_tpc0_sm_debug_sfe_control_r(void) +{ + return 0x005043b0U; +} +static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_r(void) +{ + return 0x00419bb0U; +} +static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_gpcs_tpcs_sm_debug_sfe_control_read_half_ctl_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_r(void) +{ + return 0x0041be08U; +} +static inline u32 gr_gpcs_tpcs_pes_vsc_vpc_fast_mode_switch_true_f(void) +{ + return 0x4U; +} +static inline u32 gr_ppcs_wwdx_map_gpc_map_r(u32 i) +{ + return 0x0041bf00U + i*4U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_r(void) +{ + return 0x0041bfd0U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_row_offset_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_num_entries_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_num_entries_f(u32 v) +{ + return (v & 0x1fU) << 16U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_normalized_shift_value_f(u32 v) +{ + return (v & 0x7U) << 21U; +} +static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_r(void) +{ + return 0x0041bfd4U; +} +static inline u32 gr_gpcs_ppcs_wwdx_sm_num_rcp_conservative_f(u32 v) +{ + return (v & 0xffffffU) << 0U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_r(u32 i) +{ + return 0x0041bfb0U + i*4U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff__size_1_v(void) +{ + return 0x00000005U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_0_mod_value_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_1_mod_value_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_2_mod_value_f(u32 v) +{ + return (v & 0xffU) << 16U; +} +static inline u32 gr_ppcs_wwdx_map_table_cfg_coeff_3_mod_value_f(u32 v) +{ + return (v & 0xffU) << 24U; +} +static inline u32 gr_bes_zrop_settings_r(void) +{ + return 0x00408850U; +} +static inline u32 gr_bes_zrop_settings_num_active_ltcs_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_be0_crop_debug3_r(void) +{ + return 0x00410108U; +} +static inline u32 gr_bes_crop_debug3_r(void) +{ + return 0x00408908U; +} +static inline u32 gr_bes_crop_debug3_comp_vdc_4to2_disable_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_bes_crop_debug3_blendopt_read_suppress_enabled_f(void) +{ + return 0x2U; +} +static inline u32 gr_bes_crop_debug3_blendopt_fill_override_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_bes_crop_debug3_blendopt_fill_override_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_bes_crop_debug3_blendopt_fill_override_enabled_f(void) +{ + return 0x4U; +} +static inline u32 gr_bes_crop_settings_r(void) +{ + return 0x00408958U; +} +static inline u32 gr_bes_crop_settings_num_active_ltcs_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 gr_zcull_bytes_per_aliquot_per_gpu_v(void) +{ + return 0x00000020U; +} +static inline u32 gr_zcull_save_restore_header_bytes_per_gpc_v(void) +{ + return 0x00000020U; +} +static inline u32 gr_zcull_save_restore_subregion_header_bytes_per_gpc_v(void) +{ + return 0x000000c0U; +} +static inline u32 gr_zcull_subregion_qty_v(void) +{ + return 0x00000010U; +} +static inline u32 gr_gpcs_tpcs_tex_in_dbg_r(void) +{ + return 0x00419a00U; +} +static inline u32 gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_f(u32 v) +{ + return (v & 0x1U) << 19U; +} +static inline u32 gr_gpcs_tpcs_tex_in_dbg_tsl1_rvch_invalidate_m(void) +{ + return 0x1U << 19U; +} +static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_r(void) +{ + return 0x00419bf0U; +} +static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_ld_m(void) +{ + return 0x1U << 5U; +} +static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_f(u32 v) +{ + return (v & 0x1U) << 10U; +} +static inline u32 gr_gpcs_tpcs_sm_l1tag_ctrl_cache_surface_st_m(void) +{ + return 0x1U << 10U; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control_sel0_r(void) +{ + return 0x00584200U; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control_sel1_r(void) +{ + return 0x00584204U; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control0_r(void) +{ + return 0x00584208U; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control1_r(void) +{ + return 0x00584210U; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control2_r(void) +{ + return 0x00584214U; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control3_r(void) +{ + return 0x00584218U; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control4_r(void) +{ + return 0x0058421cU; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter_control5_r(void) +{ + return 0x0058420cU; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter0_control_r(void) +{ + return 0x00584220U; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter1_control_r(void) +{ + return 0x00584224U; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter2_control_r(void) +{ + return 0x00584228U; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter3_control_r(void) +{ + return 0x0058422cU; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter4_control_r(void) +{ + return 0x00584230U; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter5_control_r(void) +{ + return 0x00584234U; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter6_control_r(void) +{ + return 0x00584238U; +} +static inline u32 gr_egpc0_etpc0_sm_dsm_perf_counter7_control_r(void) +{ + return 0x0058423cU; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter_status_s0_r(void) +{ + return 0x00584600U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter_status_s1_r(void) +{ + return 0x00584604U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter0_s0_r(void) +{ + return 0x00584624U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter1_s0_r(void) +{ + return 0x00584628U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter2_s0_r(void) +{ + return 0x0058462cU; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter3_s0_r(void) +{ + return 0x00584630U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter0_s1_r(void) +{ + return 0x00584634U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter1_s1_r(void) +{ + return 0x00584638U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter2_s1_r(void) +{ + return 0x0058463cU; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter3_s1_r(void) +{ + return 0x00584640U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter0_s2_r(void) +{ + return 0x00584644U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter1_s2_r(void) +{ + return 0x00584648U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter2_s2_r(void) +{ + return 0x0058464cU; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter3_s2_r(void) +{ + return 0x00584650U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter0_s3_r(void) +{ + return 0x00584654U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter1_s3_r(void) +{ + return 0x00584658U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter2_s3_r(void) +{ + return 0x0058465cU; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter3_s3_r(void) +{ + return 0x00584660U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter4_r(void) +{ + return 0x00584614U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter5_r(void) +{ + return 0x00584618U; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter6_r(void) +{ + return 0x0058461cU; +} +static inline u32 gr_egpc0_etpc0_sm0_dsm_perf_counter7_r(void) +{ + return 0x00584620U; +} +static inline u32 gr_fe_pwr_mode_r(void) +{ + return 0x00404170U; +} +static inline u32 gr_fe_pwr_mode_mode_auto_f(void) +{ + return 0x0U; +} +static inline u32 gr_fe_pwr_mode_mode_force_on_f(void) +{ + return 0x2U; +} +static inline u32 gr_fe_pwr_mode_req_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 gr_fe_pwr_mode_req_send_f(void) +{ + return 0x10U; +} +static inline u32 gr_fe_pwr_mode_req_done_v(void) +{ + return 0x00000000U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_r(void) +{ + return 0x00418880U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_vm_pg_size_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_use_pdb_big_page_size_m(void) +{ + return 0x1U << 11U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_vol_fault_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_comp_fault_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_miss_gran_m(void) +{ + return 0x3U << 3U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_cache_mode_m(void) +{ + return 0x3U << 5U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_aperture_m(void) +{ + return 0x3U << 28U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_vol_m(void) +{ + return 0x1U << 30U; +} +static inline u32 gr_gpcs_pri_mmu_ctrl_mmu_disable_m(void) +{ + return 0x1U << 31U; +} +static inline u32 gr_gpcs_pri_mmu_pm_unit_mask_r(void) +{ + return 0x00418890U; +} +static inline u32 gr_gpcs_pri_mmu_pm_req_mask_r(void) +{ + return 0x00418894U; +} +static inline u32 gr_gpcs_pri_mmu_debug_ctrl_r(void) +{ + return 0x004188b0U; +} +static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_v(u32 r) +{ + return (r >> 16U) & 0x1U; +} +static inline u32 gr_gpcs_pri_mmu_debug_ctrl_debug_enabled_v(void) +{ + return 0x00000001U; +} +static inline u32 gr_gpcs_pri_mmu_debug_wr_r(void) +{ + return 0x004188b4U; +} +static inline u32 gr_gpcs_pri_mmu_debug_rd_r(void) +{ + return 0x004188b8U; +} +static inline u32 gr_gpcs_mmu_num_active_ltcs_r(void) +{ + return 0x004188acU; +} +static inline u32 gr_gpcs_tpcs_sms_dbgr_control0_r(void) +{ + return 0x00419e84U; +} +static inline u32 gr_fe_gfxp_wfi_timeout_r(void) +{ + return 0x004041c0U; +} +static inline u32 gr_fe_gfxp_wfi_timeout_count_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_fe_gfxp_wfi_timeout_count_disabled_f(void) +{ + return 0x0U; +} +static inline u32 gr_gpcs_tpcs_sm_texio_control_r(void) +{ + return 0x00419bd8U; +} +static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_f(u32 v) +{ + return (v & 0x7U) << 8U; +} +static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_m(void) +{ + return 0x7U << 8U; +} +static inline u32 gr_gpcs_tpcs_sm_texio_control_oor_addr_check_mode_arm_63_48_match_f(void) +{ + return 0x100U; +} +static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_r(void) +{ + return 0x00419ba4U; +} +static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_m(void) +{ + return 0x3U << 11U; +} +static inline u32 gr_gpcs_tpcs_sm_disp_ctrl_re_suppress_disable_f(void) +{ + return 0x1000U; +} +static inline u32 gr_gpcs_tc_debug0_r(void) +{ + return 0x00418708U; +} +static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_f(u32 v) +{ + return (v & 0x1ffU) << 0U; +} +static inline u32 gr_gpcs_tc_debug0_limit_coalesce_buffer_size_m(void) +{ + return 0x1ffU << 0U; +} +static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_r(void) +{ + return 0x00500324U; +} +static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_ecc_corrected_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_ecc_corrected_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_ecc_uncorrected_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 gr_gpc0_mmu_gpcmmu_global_esr_ecc_uncorrected_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_r(void) +{ + return 0x00500314U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_sa_data_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_sa_data_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_fa_data_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_l1tlb_fa_data_m(void) +{ + return 0x1U << 2U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_sa_data_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_sa_data_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_fa_data_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_l1tlb_fa_data_m(void) +{ + return 0x1U << 3U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 18U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 18U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_total_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 16U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_unique_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 19U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_uncorrected_err_unique_counter_overflow_m(void) +{ + return 0x1U << 19U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_unique_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 17U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_corrected_err_unique_counter_overflow_m(void) +{ + return 0x1U << 17U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_reset_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_status_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_address_r(void) +{ + return 0x00500320U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_address_index_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_r(void) +{ + return 0x00500318U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_unique_total_s(void) +{ + return 16U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_unique_total_f(u32 v) +{ + return (v & 0xffffU) << 16U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_unique_total_m(void) +{ + return 0xffffU << 16U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_corrected_err_count_unique_total_v(u32 r) +{ + return (r >> 16U) & 0xffffU; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_r(void) +{ + return 0x0050031cU; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_unique_total_s(void) +{ + return 16U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_unique_total_f(u32 v) +{ + return (v & 0xffffU) << 16U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_unique_total_m(void) +{ + return 0xffffU << 16U; +} +static inline u32 gr_gpc0_mmu_l1tlb_ecc_uncorrected_err_count_unique_total_v(u32 r) +{ + return (r >> 16U) & 0xffffU; +} +static inline u32 gr_gpc0_gpccs_hww_esr_r(void) +{ + return 0x00502c98U; +} +static inline u32 gr_gpc0_gpccs_hww_esr_ecc_corrected_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_gpc0_gpccs_hww_esr_ecc_corrected_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_gpc0_gpccs_hww_esr_ecc_corrected_pending_f(void) +{ + return 0x1U; +} +static inline u32 gr_gpc0_gpccs_hww_esr_ecc_uncorrected_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 gr_gpc0_gpccs_hww_esr_ecc_uncorrected_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_gpc0_gpccs_hww_esr_ecc_uncorrected_pending_f(void) +{ + return 0x2U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_r(void) +{ + return 0x00502678U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_imem_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_imem_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_imem_pending_f(void) +{ + return 0x1U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_dmem_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_dmem_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_dmem_pending_f(void) +{ + return 0x2U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_imem_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_imem_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_imem_pending_f(void) +{ + return 0x10U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_m(void) +{ + return 0x1U << 5U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_dmem_pending_f(void) +{ + return 0x20U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 10U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 10U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_total_counter_overflow_pending_f(void) +{ + return 0x400U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_total_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 8U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 8U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_total_counter_overflow_pending_f(void) +{ + return 0x100U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 11U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_m(void) +{ + return 0x1U << 11U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_pending_f(void) +{ + return 0x800U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_unique_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 9U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_unique_counter_overflow_m(void) +{ + return 0x1U << 9U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_corrected_err_unique_counter_overflow_pending_f(void) +{ + return 0x200U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_reset_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_status_reset_task_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_address_r(void) +{ + return 0x00502684U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_address_index_f(u32 v) +{ + return (v & 0x7fffffU) << 0U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_address_row_address_s(void) +{ + return 20U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_address_row_address_f(u32 v) +{ + return (v & 0xfffffU) << 0U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_address_row_address_m(void) +{ + return 0xfffffU << 0U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_address_row_address_v(u32 r) +{ + return (r >> 0U) & 0xfffffU; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_r(void) +{ + return 0x0050267cU; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_unique_total_s(void) +{ + return 16U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_unique_total_f(u32 v) +{ + return (v & 0xffffU) << 16U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_unique_total_m(void) +{ + return 0xffffU << 16U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_corrected_err_count_unique_total_v(u32 r) +{ + return (r >> 16U) & 0xffffU; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_r(void) +{ + return 0x00502680U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_unique_total_s(void) +{ + return 16U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_unique_total_f(u32 v) +{ + return (v & 0xffffU) << 16U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_unique_total_m(void) +{ + return 0xffffU << 16U; +} +static inline u32 gr_gpc0_gpccs_falcon_ecc_uncorrected_err_count_unique_total_v(u32 r) +{ + return (r >> 16U) & 0xffffU; +} +static inline u32 gr_fecs_falcon_ecc_status_r(void) +{ + return 0x00409678U; +} +static inline u32 gr_fecs_falcon_ecc_status_corrected_err_imem_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 gr_fecs_falcon_ecc_status_corrected_err_imem_m(void) +{ + return 0x1U << 0U; +} +static inline u32 gr_fecs_falcon_ecc_status_corrected_err_imem_pending_f(void) +{ + return 0x1U; +} +static inline u32 gr_fecs_falcon_ecc_status_corrected_err_dmem_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 gr_fecs_falcon_ecc_status_corrected_err_dmem_m(void) +{ + return 0x1U << 1U; +} +static inline u32 gr_fecs_falcon_ecc_status_corrected_err_dmem_pending_f(void) +{ + return 0x2U; +} +static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_imem_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_imem_m(void) +{ + return 0x1U << 4U; +} +static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_imem_pending_f(void) +{ + return 0x10U; +} +static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_dmem_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_dmem_m(void) +{ + return 0x1U << 5U; +} +static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_dmem_pending_f(void) +{ + return 0x20U; +} +static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 10U; +} +static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 10U; +} +static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_total_counter_overflow_pending_f(void) +{ + return 0x400U; +} +static inline u32 gr_fecs_falcon_ecc_status_corrected_err_total_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 8U; +} +static inline u32 gr_fecs_falcon_ecc_status_corrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 8U; +} +static inline u32 gr_fecs_falcon_ecc_status_corrected_err_total_counter_overflow_pending_f(void) +{ + return 0x100U; +} +static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 11U; +} +static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_m(void) +{ + return 0x1U << 11U; +} +static inline u32 gr_fecs_falcon_ecc_status_uncorrected_err_unique_counter_overflow_pending_f(void) +{ + return 0x800U; +} +static inline u32 gr_fecs_falcon_ecc_status_corrected_err_unique_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 9U; +} +static inline u32 gr_fecs_falcon_ecc_status_corrected_err_unique_counter_overflow_m(void) +{ + return 0x1U << 9U; +} +static inline u32 gr_fecs_falcon_ecc_status_corrected_err_unique_counter_overflow_pending_f(void) +{ + return 0x200U; +} +static inline u32 gr_fecs_falcon_ecc_status_reset_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 gr_fecs_falcon_ecc_status_reset_task_f(void) +{ + return 0x80000000U; +} +static inline u32 gr_fecs_falcon_ecc_address_r(void) +{ + return 0x00409684U; +} +static inline u32 gr_fecs_falcon_ecc_address_index_f(u32 v) +{ + return (v & 0x7fffffU) << 0U; +} +static inline u32 gr_fecs_falcon_ecc_address_row_address_s(void) +{ + return 20U; +} +static inline u32 gr_fecs_falcon_ecc_address_row_address_f(u32 v) +{ + return (v & 0xfffffU) << 0U; +} +static inline u32 gr_fecs_falcon_ecc_address_row_address_m(void) +{ + return 0xfffffU << 0U; +} +static inline u32 gr_fecs_falcon_ecc_address_row_address_v(u32 r) +{ + return (r >> 0U) & 0xfffffU; +} +static inline u32 gr_fecs_falcon_ecc_corrected_err_count_r(void) +{ + return 0x0040967cU; +} +static inline u32 gr_fecs_falcon_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 gr_fecs_falcon_ecc_corrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_fecs_falcon_ecc_corrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 gr_fecs_falcon_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_fecs_falcon_ecc_corrected_err_count_unique_total_s(void) +{ + return 16U; +} +static inline u32 gr_fecs_falcon_ecc_corrected_err_count_unique_total_f(u32 v) +{ + return (v & 0xffffU) << 16U; +} +static inline u32 gr_fecs_falcon_ecc_corrected_err_count_unique_total_m(void) +{ + return 0xffffU << 16U; +} +static inline u32 gr_fecs_falcon_ecc_corrected_err_count_unique_total_v(u32 r) +{ + return (r >> 16U) & 0xffffU; +} +static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_r(void) +{ + return 0x00409680U; +} +static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_unique_total_s(void) +{ + return 16U; +} +static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_unique_total_f(u32 v) +{ + return (v & 0xffffU) << 16U; +} +static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_unique_total_m(void) +{ + return 0xffffU << 16U; +} +static inline u32 gr_fecs_falcon_ecc_uncorrected_err_count_unique_total_v(u32 r) +{ + return (r >> 16U) & 0xffffU; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ltc_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ltc_gv11b.h new file mode 100644 index 000000000..769bcf0c3 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ltc_gv11b.h @@ -0,0 +1,803 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_ltc_gv11b_h_ +#define _hw_ltc_gv11b_h_ + +static inline u32 ltc_pltcg_base_v(void) +{ + return 0x00140000U; +} +static inline u32 ltc_pltcg_extent_v(void) +{ + return 0x0017ffffU; +} +static inline u32 ltc_ltc0_ltss_v(void) +{ + return 0x00140200U; +} +static inline u32 ltc_ltc0_lts0_v(void) +{ + return 0x00140400U; +} +static inline u32 ltc_ltcs_ltss_v(void) +{ + return 0x0017e200U; +} +static inline u32 ltc_ltcs_lts0_cbc_ctrl1_r(void) +{ + return 0x0014046cU; +} +static inline u32 ltc_ltc0_lts0_dstg_cfg0_r(void) +{ + return 0x00140518U; +} +static inline u32 ltc_ltcs_ltss_dstg_cfg0_r(void) +{ + return 0x0017e318U; +} +static inline u32 ltc_ltcs_ltss_dstg_cfg0_vdc_4to2_disable_m(void) +{ + return 0x1U << 15U; +} +static inline u32 ltc_ltc0_lts0_tstg_cfg1_r(void) +{ + return 0x00140494U; +} +static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_ways_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_v(u32 r) +{ + return (r >> 16U) & 0x3U; +} +static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_all_v(void) +{ + return 0x00000000U; +} +static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_half_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc0_lts0_tstg_cfg1_active_sets_quarter_v(void) +{ + return 0x00000002U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl1_r(void) +{ + return 0x0017e26cU; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clean_active_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl1_invalidate_active_f(void) +{ + return 0x2U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl1_clear_active_f(void) +{ + return 0x4U; +} +static inline u32 ltc_ltc0_lts0_cbc_ctrl1_r(void) +{ + return 0x0014046cU; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl2_r(void) +{ + return 0x0017e270U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl2_clear_lower_bound_f(u32 v) +{ + return (v & 0x3ffffU) << 0U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl3_r(void) +{ + return 0x0017e274U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_f(u32 v) +{ + return (v & 0x3ffffU) << 0U; +} +static inline u32 ltc_ltcs_ltss_cbc_ctrl3_clear_upper_bound_init_v(void) +{ + return 0x0003ffffU; +} +static inline u32 ltc_ltcs_ltss_cbc_base_r(void) +{ + return 0x0017e278U; +} +static inline u32 ltc_ltcs_ltss_cbc_base_alignment_shift_v(void) +{ + return 0x0000000bU; +} +static inline u32 ltc_ltcs_ltss_cbc_base_address_v(u32 r) +{ + return (r >> 0U) & 0x3ffffffU; +} +static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_r(void) +{ + return 0x0017e27cU; +} +static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs__v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_nvlink_peer_through_l2_v(u32 r) +{ + return (r >> 24U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_f(u32 v) +{ + return (v & 0x1U) << 25U; +} +static inline u32 ltc_ltcs_ltss_cbc_num_active_ltcs_serialize_v(u32 r) +{ + return (r >> 25U) & 0x1U; +} +static inline u32 ltc_ltcs_misc_ltc_num_active_ltcs_r(void) +{ + return 0x0017e000U; +} +static inline u32 ltc_ltcs_ltss_cbc_param_r(void) +{ + return 0x0017e280U; +} +static inline u32 ltc_ltcs_ltss_cbc_param_comptags_per_cache_line_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ltc_ltcs_ltss_cbc_param_cache_line_size_v(u32 r) +{ + return (r >> 24U) & 0xfU; +} +static inline u32 ltc_ltcs_ltss_cbc_param_slices_per_ltc_v(u32 r) +{ + return (r >> 28U) & 0xfU; +} +static inline u32 ltc_ltcs_ltss_cbc_param2_r(void) +{ + return 0x0017e3f4U; +} +static inline u32 ltc_ltcs_ltss_cbc_param2_gobs_per_comptagline_per_slice_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_r(void) +{ + return 0x0017e2acU; +} +static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_max_ways_evict_last_f(u32 v) +{ + return (v & 0x1fU) << 16U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_index_r(void) +{ + return 0x0017e338U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_index_address_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value_r(u32 i) +{ + return 0x0017e33cU + i*4U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_color_clear_value__size_1_v(void) +{ + return 0x00000004U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_r(void) +{ + return 0x0017e34cU; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_s(void) +{ + return 32U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_depth_clear_value_field_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_r(void) +{ + return 0x0017e204U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_s(void) +{ + return 8U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_m(void) +{ + return 0xffU << 0U; +} +static inline u32 ltc_ltcs_ltss_dstg_zbc_stencil_clear_value_field_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_r(void) +{ + return 0x0017e2b0U; +} +static inline u32 ltc_ltcs_ltss_tstg_set_mgmt_2_l2_bypass_mode_enabled_f(void) +{ + return 0x10000000U; +} +static inline u32 ltc_ltcs_ltss_g_elpg_r(void) +{ + return 0x0017e214U; +} +static inline u32 ltc_ltcs_ltss_g_elpg_flush_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_g_elpg_flush_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltc0_ltss_g_elpg_r(void) +{ + return 0x00140214U; +} +static inline u32 ltc_ltc0_ltss_g_elpg_flush_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc0_ltss_g_elpg_flush_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltc1_ltss_g_elpg_r(void) +{ + return 0x00142214U; +} +static inline u32 ltc_ltc1_ltss_g_elpg_flush_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc1_ltss_g_elpg_flush_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltcs_ltss_intr_r(void) +{ + return 0x0017e20cU; +} +static inline u32 ltc_ltcs_ltss_intr_ecc_sec_error_pending_f(void) +{ + return 0x100U; +} +static inline u32 ltc_ltcs_ltss_intr_ecc_ded_error_pending_f(void) +{ + return 0x200U; +} +static inline u32 ltc_ltcs_ltss_intr_en_evicted_cb_m(void) +{ + return 0x1U << 20U; +} +static inline u32 ltc_ltcs_ltss_intr_en_illegal_compstat_access_m(void) +{ + return 0x1U << 30U; +} +static inline u32 ltc_ltcs_ltss_intr_en_ecc_sec_error_enabled_f(void) +{ + return 0x1000000U; +} +static inline u32 ltc_ltcs_ltss_intr_en_ecc_ded_error_enabled_f(void) +{ + return 0x2000000U; +} +static inline u32 ltc_ltc0_lts0_intr_r(void) +{ + return 0x0014040cU; +} +static inline u32 ltc_ltcs_ltss_intr3_r(void) +{ + return 0x0017e388U; +} +static inline u32 ltc_ltcs_ltss_intr3_ecc_corrected_m(void) +{ + return 0x1U << 7U; +} +static inline u32 ltc_ltcs_ltss_intr3_ecc_uncorrected_m(void) +{ + return 0x1U << 8U; +} +static inline u32 ltc_ltc0_lts0_intr3_r(void) +{ + return 0x00140588U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_r(void) +{ + return 0x001404f0U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_rstg_m(void) +{ + return 0x1U << 1U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_tstg_m(void) +{ + return 0x1U << 3U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_dstg_m(void) +{ + return 0x1U << 5U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_rstg_m(void) +{ + return 0x1U << 0U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_tstg_m(void) +{ + return 0x1U << 2U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_dstg_m(void) +{ + return 0x1U << 4U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 18U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 18U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_total_counter_overflow_m(void) +{ + return 0x1U << 16U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_unique_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 19U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_uncorrected_err_unique_counter_overflow_m(void) +{ + return 0x1U << 19U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_unique_counter_overflow_f(u32 v) +{ + return (v & 0x1U) << 17U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_corrected_err_unique_counter_overflow_m(void) +{ + return 0x1U << 17U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_reset_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_status_reset_task_f(void) +{ + return 0x40000000U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_address_r(void) +{ + return 0x001404fcU; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_r(void) +{ + return 0x001404f4U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_unique_total_s(void) +{ + return 16U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_unique_total_f(u32 v) +{ + return (v & 0xffffU) << 16U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_unique_total_m(void) +{ + return 0xffffU << 16U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_corrected_err_count_unique_total_v(u32 r) +{ + return (r >> 16U) & 0xffffU; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_r(void) +{ + return 0x001404f8U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_s(void) +{ + return 16U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_f(u32 v) +{ + return (v & 0xffffU) << 0U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_m(void) +{ + return 0xffffU << 0U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_total_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_unique_total_s(void) +{ + return 16U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_unique_total_f(u32 v) +{ + return (v & 0xffffU) << 16U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_unique_total_m(void) +{ + return 0xffffU << 16U; +} +static inline u32 ltc_ltc0_lts0_l2_cache_ecc_uncorrected_err_count_unique_total_v(u32 r) +{ + return (r >> 16U) & 0xffffU; +} +static inline u32 ltc_ltc0_lts0_dstg_ecc_report_r(void) +{ + return 0x0014051cU; +} +static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_m(void) +{ + return 0xffU << 0U; +} +static inline u32 ltc_ltc0_lts0_dstg_ecc_report_sec_count_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_m(void) +{ + return 0xffU << 16U; +} +static inline u32 ltc_ltc0_lts0_dstg_ecc_report_ded_count_v(u32 r) +{ + return (r >> 16U) & 0xffU; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_r(void) +{ + return 0x0017e2a0U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_v(u32 r) +{ + return (r >> 8U) & 0xfU; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_v(void) +{ + return 0x00000003U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_max_cycles_between_invalidates_3_f(void) +{ + return 0x300U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_v(u32 r) +{ + return (r >> 28U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_last_class_true_f(void) +{ + return 0x10000000U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_v(u32 r) +{ + return (r >> 29U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_normal_class_true_f(void) +{ + return 0x20000000U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt0_invalidate_evict_first_class_true_f(void) +{ + return 0x40000000U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_r(void) +{ + return 0x0017e2a4U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_v(u32 r) +{ + return (r >> 8U) & 0xfU; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_v(void) +{ + return 0x00000003U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_max_cycles_between_cleans_3_f(void) +{ + return 0x300U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_v(u32 r) +{ + return (r >> 16U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_wait_for_fb_to_pull_true_f(void) +{ + return 0x10000U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_v(u32 r) +{ + return (r >> 28U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_last_class_true_f(void) +{ + return 0x10000000U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_v(u32 r) +{ + return (r >> 29U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_normal_class_true_f(void) +{ + return 0x20000000U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltcs_ltss_tstg_cmgmt1_clean_evict_first_class_true_f(void) +{ + return 0x40000000U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_r(void) +{ + return 0x001402a0U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt0_invalidate_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_r(void) +{ + return 0x001402a4U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc0_ltss_tstg_cmgmt1_clean_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_r(void) +{ + return 0x001422a0U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt0_invalidate_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_r(void) +{ + return 0x001422a4U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_v(void) +{ + return 0x00000001U; +} +static inline u32 ltc_ltc1_ltss_tstg_cmgmt1_clean_pending_f(void) +{ + return 0x1U; +} +static inline u32 ltc_ltc0_lts0_tstg_info_1_r(void) +{ + return 0x0014058cU; +} +static inline u32 ltc_ltc0_lts0_tstg_info_1_slice_size_in_kb_v(u32 r) +{ + return (r >> 0U) & 0xffffU; +} +static inline u32 ltc_ltc0_lts0_tstg_info_1_slices_per_l2_v(u32 r) +{ + return (r >> 16U) & 0x1fU; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_mc_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_mc_gv11b.h new file mode 100644 index 000000000..bff730763 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_mc_gv11b.h @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_mc_gv11b_h_ +#define _hw_mc_gv11b_h_ + +static inline u32 mc_boot_0_r(void) +{ + return 0x00000000U; +} +static inline u32 mc_boot_0_architecture_v(u32 r) +{ + return (r >> 24U) & 0x1fU; +} +static inline u32 mc_boot_0_implementation_v(u32 r) +{ + return (r >> 20U) & 0xfU; +} +static inline u32 mc_boot_0_major_revision_v(u32 r) +{ + return (r >> 4U) & 0xfU; +} +static inline u32 mc_boot_0_minor_revision_v(u32 r) +{ + return (r >> 0U) & 0xfU; +} +static inline u32 mc_intr_r(u32 i) +{ + return 0x00000100U + i*4U; +} +static inline u32 mc_intr_pfifo_pending_f(void) +{ + return 0x100U; +} +static inline u32 mc_intr_hub_pending_f(void) +{ + return 0x200U; +} +static inline u32 mc_intr_pgraph_pending_f(void) +{ + return 0x1000U; +} +static inline u32 mc_intr_pmu_pending_f(void) +{ + return 0x1000000U; +} +static inline u32 mc_intr_ltc_pending_f(void) +{ + return 0x2000000U; +} +static inline u32 mc_intr_priv_ring_pending_f(void) +{ + return 0x40000000U; +} +static inline u32 mc_intr_pbus_pending_f(void) +{ + return 0x10000000U; +} +static inline u32 mc_intr_en_r(u32 i) +{ + return 0x00000140U + i*4U; +} +static inline u32 mc_intr_en_set_r(u32 i) +{ + return 0x00000160U + i*4U; +} +static inline u32 mc_intr_en_clear_r(u32 i) +{ + return 0x00000180U + i*4U; +} +static inline u32 mc_enable_r(void) +{ + return 0x00000200U; +} +static inline u32 mc_enable_xbar_enabled_f(void) +{ + return 0x4U; +} +static inline u32 mc_enable_l2_enabled_f(void) +{ + return 0x8U; +} +static inline u32 mc_enable_pmedia_s(void) +{ + return 1U; +} +static inline u32 mc_enable_pmedia_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 mc_enable_pmedia_m(void) +{ + return 0x1U << 4U; +} +static inline u32 mc_enable_pmedia_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 mc_enable_ce0_m(void) +{ + return 0x1U << 6U; +} +static inline u32 mc_enable_pfifo_enabled_f(void) +{ + return 0x100U; +} +static inline u32 mc_enable_pgraph_enabled_f(void) +{ + return 0x1000U; +} +static inline u32 mc_enable_pwr_v(u32 r) +{ + return (r >> 13U) & 0x1U; +} +static inline u32 mc_enable_pwr_disabled_v(void) +{ + return 0x00000000U; +} +static inline u32 mc_enable_pwr_enabled_f(void) +{ + return 0x2000U; +} +static inline u32 mc_enable_pfb_enabled_f(void) +{ + return 0x100000U; +} +static inline u32 mc_enable_ce2_m(void) +{ + return 0x1U << 21U; +} +static inline u32 mc_enable_ce2_enabled_f(void) +{ + return 0x200000U; +} +static inline u32 mc_enable_blg_enabled_f(void) +{ + return 0x8000000U; +} +static inline u32 mc_enable_perfmon_enabled_f(void) +{ + return 0x10000000U; +} +static inline u32 mc_enable_hub_enabled_f(void) +{ + return 0x20000000U; +} +static inline u32 mc_intr_ltc_r(void) +{ + return 0x000001c0U; +} +static inline u32 mc_enable_pb_r(void) +{ + return 0x00000204U; +} +static inline u32 mc_enable_pb_0_s(void) +{ + return 1U; +} +static inline u32 mc_enable_pb_0_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 mc_enable_pb_0_m(void) +{ + return 0x1U << 0U; +} +static inline u32 mc_enable_pb_0_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 mc_enable_pb_0_enabled_v(void) +{ + return 0x00000001U; +} +static inline u32 mc_enable_pb_sel_f(u32 v, u32 i) +{ + return (v & 0x1U) << (0U + i*1U); +} +static inline u32 mc_elpg_enable_r(void) +{ + return 0x0000020cU; +} +static inline u32 mc_elpg_enable_xbar_enabled_f(void) +{ + return 0x4U; +} +static inline u32 mc_elpg_enable_pfb_enabled_f(void) +{ + return 0x100000U; +} +static inline u32 mc_elpg_enable_hub_enabled_f(void) +{ + return 0x20000000U; +} +static inline u32 mc_elpg_enable_l2_enabled_f(void) +{ + return 0x8U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pbdma_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pbdma_gv11b.h new file mode 100644 index 000000000..9b9017ee6 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pbdma_gv11b.h @@ -0,0 +1,659 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_pbdma_gv11b_h_ +#define _hw_pbdma_gv11b_h_ + +static inline u32 pbdma_gp_entry1_r(void) +{ + return 0x10000004U; +} +static inline u32 pbdma_gp_entry1_get_hi_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 pbdma_gp_entry1_length_f(u32 v) +{ + return (v & 0x1fffffU) << 10U; +} +static inline u32 pbdma_gp_entry1_length_v(u32 r) +{ + return (r >> 10U) & 0x1fffffU; +} +static inline u32 pbdma_gp_base_r(u32 i) +{ + return 0x00040048U + i*8192U; +} +static inline u32 pbdma_gp_base__size_1_v(void) +{ + return 0x00000003U; +} +static inline u32 pbdma_gp_base_offset_f(u32 v) +{ + return (v & 0x1fffffffU) << 3U; +} +static inline u32 pbdma_gp_base_rsvd_s(void) +{ + return 3U; +} +static inline u32 pbdma_gp_base_hi_r(u32 i) +{ + return 0x0004004cU + i*8192U; +} +static inline u32 pbdma_gp_base_hi_offset_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 pbdma_gp_base_hi_limit2_f(u32 v) +{ + return (v & 0x1fU) << 16U; +} +static inline u32 pbdma_gp_fetch_r(u32 i) +{ + return 0x00040050U + i*8192U; +} +static inline u32 pbdma_gp_get_r(u32 i) +{ + return 0x00040014U + i*8192U; +} +static inline u32 pbdma_gp_put_r(u32 i) +{ + return 0x00040000U + i*8192U; +} +static inline u32 pbdma_pb_fetch_r(u32 i) +{ + return 0x00040054U + i*8192U; +} +static inline u32 pbdma_pb_fetch_hi_r(u32 i) +{ + return 0x00040058U + i*8192U; +} +static inline u32 pbdma_get_r(u32 i) +{ + return 0x00040018U + i*8192U; +} +static inline u32 pbdma_get_hi_r(u32 i) +{ + return 0x0004001cU + i*8192U; +} +static inline u32 pbdma_put_r(u32 i) +{ + return 0x0004005cU + i*8192U; +} +static inline u32 pbdma_put_hi_r(u32 i) +{ + return 0x00040060U + i*8192U; +} +static inline u32 pbdma_pb_header_r(u32 i) +{ + return 0x00040084U + i*8192U; +} +static inline u32 pbdma_pb_header_priv_user_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_pb_header_method_zero_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_pb_header_subchannel_zero_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_pb_header_level_main_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_pb_header_first_true_f(void) +{ + return 0x400000U; +} +static inline u32 pbdma_pb_header_type_inc_f(void) +{ + return 0x20000000U; +} +static inline u32 pbdma_pb_header_type_non_inc_f(void) +{ + return 0x60000000U; +} +static inline u32 pbdma_hdr_shadow_r(u32 i) +{ + return 0x00040118U + i*8192U; +} +static inline u32 pbdma_gp_shadow_0_r(u32 i) +{ + return 0x00040110U + i*8192U; +} +static inline u32 pbdma_gp_shadow_1_r(u32 i) +{ + return 0x00040114U + i*8192U; +} +static inline u32 pbdma_subdevice_r(u32 i) +{ + return 0x00040094U + i*8192U; +} +static inline u32 pbdma_subdevice_id_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 pbdma_subdevice_status_active_f(void) +{ + return 0x10000000U; +} +static inline u32 pbdma_subdevice_channel_dma_enable_f(void) +{ + return 0x20000000U; +} +static inline u32 pbdma_method0_r(u32 i) +{ + return 0x000400c0U + i*8192U; +} +static inline u32 pbdma_method0_fifo_size_v(void) +{ + return 0x00000004U; +} +static inline u32 pbdma_method0_addr_f(u32 v) +{ + return (v & 0xfffU) << 2U; +} +static inline u32 pbdma_method0_addr_v(u32 r) +{ + return (r >> 2U) & 0xfffU; +} +static inline u32 pbdma_method0_subch_v(u32 r) +{ + return (r >> 16U) & 0x7U; +} +static inline u32 pbdma_method0_first_true_f(void) +{ + return 0x400000U; +} +static inline u32 pbdma_method0_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 pbdma_method1_r(u32 i) +{ + return 0x000400c8U + i*8192U; +} +static inline u32 pbdma_method2_r(u32 i) +{ + return 0x000400d0U + i*8192U; +} +static inline u32 pbdma_method3_r(u32 i) +{ + return 0x000400d8U + i*8192U; +} +static inline u32 pbdma_data0_r(u32 i) +{ + return 0x000400c4U + i*8192U; +} +static inline u32 pbdma_acquire_r(u32 i) +{ + return 0x00040030U + i*8192U; +} +static inline u32 pbdma_acquire_retry_man_2_f(void) +{ + return 0x2U; +} +static inline u32 pbdma_acquire_retry_exp_2_f(void) +{ + return 0x100U; +} +static inline u32 pbdma_acquire_timeout_exp_f(u32 v) +{ + return (v & 0xfU) << 11U; +} +static inline u32 pbdma_acquire_timeout_exp_max_v(void) +{ + return 0x0000000fU; +} +static inline u32 pbdma_acquire_timeout_exp_max_f(void) +{ + return 0x7800U; +} +static inline u32 pbdma_acquire_timeout_man_f(u32 v) +{ + return (v & 0xffffU) << 15U; +} +static inline u32 pbdma_acquire_timeout_man_max_v(void) +{ + return 0x0000ffffU; +} +static inline u32 pbdma_acquire_timeout_man_max_f(void) +{ + return 0x7fff8000U; +} +static inline u32 pbdma_acquire_timeout_en_enable_f(void) +{ + return 0x80000000U; +} +static inline u32 pbdma_acquire_timeout_en_disable_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_status_r(u32 i) +{ + return 0x00040100U + i*8192U; +} +static inline u32 pbdma_channel_r(u32 i) +{ + return 0x00040120U + i*8192U; +} +static inline u32 pbdma_signature_r(u32 i) +{ + return 0x00040010U + i*8192U; +} +static inline u32 pbdma_signature_hw_valid_f(void) +{ + return 0xfaceU; +} +static inline u32 pbdma_signature_sw_zero_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_userd_r(u32 i) +{ + return 0x00040008U + i*8192U; +} +static inline u32 pbdma_userd_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_userd_target_sys_mem_coh_f(void) +{ + return 0x2U; +} +static inline u32 pbdma_userd_target_sys_mem_ncoh_f(void) +{ + return 0x3U; +} +static inline u32 pbdma_userd_addr_f(u32 v) +{ + return (v & 0x7fffffU) << 9U; +} +static inline u32 pbdma_config_r(u32 i) +{ + return 0x000400f4U + i*8192U; +} +static inline u32 pbdma_config_l2_evict_first_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_config_l2_evict_normal_f(void) +{ + return 0x1U; +} +static inline u32 pbdma_config_l2_evict_last_f(void) +{ + return 0x2U; +} +static inline u32 pbdma_config_ce_split_enable_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_config_ce_split_disable_f(void) +{ + return 0x10U; +} +static inline u32 pbdma_config_auth_level_non_privileged_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_config_auth_level_privileged_f(void) +{ + return 0x100U; +} +static inline u32 pbdma_config_userd_writeback_disable_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_config_userd_writeback_enable_f(void) +{ + return 0x1000U; +} +static inline u32 pbdma_userd_hi_r(u32 i) +{ + return 0x0004000cU + i*8192U; +} +static inline u32 pbdma_userd_hi_addr_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 pbdma_hce_ctrl_r(u32 i) +{ + return 0x000400e4U + i*8192U; +} +static inline u32 pbdma_hce_ctrl_hce_priv_mode_yes_f(void) +{ + return 0x20U; +} +static inline u32 pbdma_intr_0_r(u32 i) +{ + return 0x00040108U + i*8192U; +} +static inline u32 pbdma_intr_0_memreq_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 pbdma_intr_0_memreq_pending_f(void) +{ + return 0x1U; +} +static inline u32 pbdma_intr_0_memack_timeout_pending_f(void) +{ + return 0x2U; +} +static inline u32 pbdma_intr_0_memack_extra_pending_f(void) +{ + return 0x4U; +} +static inline u32 pbdma_intr_0_memdat_timeout_pending_f(void) +{ + return 0x8U; +} +static inline u32 pbdma_intr_0_memdat_extra_pending_f(void) +{ + return 0x10U; +} +static inline u32 pbdma_intr_0_memflush_pending_f(void) +{ + return 0x20U; +} +static inline u32 pbdma_intr_0_memop_pending_f(void) +{ + return 0x40U; +} +static inline u32 pbdma_intr_0_lbconnect_pending_f(void) +{ + return 0x80U; +} +static inline u32 pbdma_intr_0_lbreq_pending_f(void) +{ + return 0x100U; +} +static inline u32 pbdma_intr_0_lback_timeout_pending_f(void) +{ + return 0x200U; +} +static inline u32 pbdma_intr_0_lback_extra_pending_f(void) +{ + return 0x400U; +} +static inline u32 pbdma_intr_0_lbdat_timeout_pending_f(void) +{ + return 0x800U; +} +static inline u32 pbdma_intr_0_lbdat_extra_pending_f(void) +{ + return 0x1000U; +} +static inline u32 pbdma_intr_0_gpfifo_pending_f(void) +{ + return 0x2000U; +} +static inline u32 pbdma_intr_0_gpptr_pending_f(void) +{ + return 0x4000U; +} +static inline u32 pbdma_intr_0_gpentry_pending_f(void) +{ + return 0x8000U; +} +static inline u32 pbdma_intr_0_gpcrc_pending_f(void) +{ + return 0x10000U; +} +static inline u32 pbdma_intr_0_pbptr_pending_f(void) +{ + return 0x20000U; +} +static inline u32 pbdma_intr_0_pbentry_pending_f(void) +{ + return 0x40000U; +} +static inline u32 pbdma_intr_0_pbcrc_pending_f(void) +{ + return 0x80000U; +} +static inline u32 pbdma_intr_0_clear_faulted_error_pending_f(void) +{ + return 0x100000U; +} +static inline u32 pbdma_intr_0_method_pending_f(void) +{ + return 0x200000U; +} +static inline u32 pbdma_intr_0_methodcrc_pending_f(void) +{ + return 0x400000U; +} +static inline u32 pbdma_intr_0_device_pending_f(void) +{ + return 0x800000U; +} +static inline u32 pbdma_intr_0_eng_reset_pending_f(void) +{ + return 0x1000000U; +} +static inline u32 pbdma_intr_0_semaphore_pending_f(void) +{ + return 0x2000000U; +} +static inline u32 pbdma_intr_0_acquire_pending_f(void) +{ + return 0x4000000U; +} +static inline u32 pbdma_intr_0_pri_pending_f(void) +{ + return 0x8000000U; +} +static inline u32 pbdma_intr_0_no_ctxsw_seg_pending_f(void) +{ + return 0x20000000U; +} +static inline u32 pbdma_intr_0_pbseg_pending_f(void) +{ + return 0x40000000U; +} +static inline u32 pbdma_intr_0_signature_pending_f(void) +{ + return 0x80000000U; +} +static inline u32 pbdma_intr_1_r(u32 i) +{ + return 0x00040148U + i*8192U; +} +static inline u32 pbdma_intr_1_ctxnotvalid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 pbdma_intr_1_ctxnotvalid_pending_f(void) +{ + return 0x80000000U; +} +static inline u32 pbdma_intr_en_0_r(u32 i) +{ + return 0x0004010cU + i*8192U; +} +static inline u32 pbdma_intr_en_0_lbreq_enabled_f(void) +{ + return 0x100U; +} +static inline u32 pbdma_intr_en_1_r(u32 i) +{ + return 0x0004014cU + i*8192U; +} +static inline u32 pbdma_intr_stall_r(u32 i) +{ + return 0x0004013cU + i*8192U; +} +static inline u32 pbdma_intr_stall_lbreq_enabled_f(void) +{ + return 0x100U; +} +static inline u32 pbdma_intr_stall_1_r(u32 i) +{ + return 0x00040140U + i*8192U; +} +static inline u32 pbdma_udma_nop_r(void) +{ + return 0x00000008U; +} +static inline u32 pbdma_runlist_timeslice_r(u32 i) +{ + return 0x000400f8U + i*8192U; +} +static inline u32 pbdma_runlist_timeslice_timeout_128_f(void) +{ + return 0x80U; +} +static inline u32 pbdma_runlist_timeslice_timescale_3_f(void) +{ + return 0x3000U; +} +static inline u32 pbdma_runlist_timeslice_enable_true_f(void) +{ + return 0x10000000U; +} +static inline u32 pbdma_target_r(u32 i) +{ + return 0x000400acU + i*8192U; +} +static inline u32 pbdma_target_engine_sw_f(void) +{ + return 0x1fU; +} +static inline u32 pbdma_target_eng_ctx_valid_true_f(void) +{ + return 0x10000U; +} +static inline u32 pbdma_target_eng_ctx_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_target_ce_ctx_valid_true_f(void) +{ + return 0x20000U; +} +static inline u32 pbdma_target_ce_ctx_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_target_host_tsg_event_reason_pbdma_idle_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_target_host_tsg_event_reason_semaphore_acquire_failure_f(void) +{ + return 0x1000000U; +} +static inline u32 pbdma_target_host_tsg_event_reason_tsg_yield_f(void) +{ + return 0x2000000U; +} +static inline u32 pbdma_target_host_tsg_event_reason_host_subchannel_switch_f(void) +{ + return 0x3000000U; +} +static inline u32 pbdma_target_should_send_tsg_event_true_f(void) +{ + return 0x20000000U; +} +static inline u32 pbdma_target_should_send_tsg_event_false_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_target_needs_host_tsg_event_true_f(void) +{ + return 0x80000000U; +} +static inline u32 pbdma_target_needs_host_tsg_event_false_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_set_channel_info_r(u32 i) +{ + return 0x000400fcU + i*8192U; +} +static inline u32 pbdma_set_channel_info_scg_type_graphics_compute0_f(void) +{ + return 0x0U; +} +static inline u32 pbdma_set_channel_info_scg_type_compute1_f(void) +{ + return 0x1U; +} +static inline u32 pbdma_set_channel_info_veid_f(u32 v) +{ + return (v & 0x3fU) << 8U; +} +static inline u32 pbdma_timeout_r(u32 i) +{ + return 0x0004012cU + i*8192U; +} +static inline u32 pbdma_timeout_period_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 pbdma_timeout_period_max_f(void) +{ + return 0xffffffffU; +} +static inline u32 pbdma_timeout_period_init_f(void) +{ + return 0x10000U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_perf_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_perf_gv11b.h new file mode 100644 index 000000000..788a6ab6e --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_perf_gv11b.h @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_perf_gv11b_h_ +#define _hw_perf_gv11b_h_ + +static inline u32 perf_pmasys_control_r(void) +{ + return 0x0024a000U; +} +static inline u32 perf_pmasys_control_membuf_status_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 perf_pmasys_control_membuf_status_overflowed_v(void) +{ + return 0x00000001U; +} +static inline u32 perf_pmasys_control_membuf_status_overflowed_f(void) +{ + return 0x10U; +} +static inline u32 perf_pmasys_control_membuf_clear_status_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 perf_pmasys_control_membuf_clear_status_v(u32 r) +{ + return (r >> 5U) & 0x1U; +} +static inline u32 perf_pmasys_control_membuf_clear_status_doit_v(void) +{ + return 0x00000001U; +} +static inline u32 perf_pmasys_control_membuf_clear_status_doit_f(void) +{ + return 0x20U; +} +static inline u32 perf_pmasys_mem_block_r(void) +{ + return 0x0024a070U; +} +static inline u32 perf_pmasys_mem_block_base_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 perf_pmasys_mem_block_target_f(u32 v) +{ + return (v & 0x3U) << 28U; +} +static inline u32 perf_pmasys_mem_block_target_v(u32 r) +{ + return (r >> 28U) & 0x3U; +} +static inline u32 perf_pmasys_mem_block_target_lfb_v(void) +{ + return 0x00000000U; +} +static inline u32 perf_pmasys_mem_block_target_lfb_f(void) +{ + return 0x0U; +} +static inline u32 perf_pmasys_mem_block_target_sys_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 perf_pmasys_mem_block_target_sys_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 perf_pmasys_mem_block_target_sys_ncoh_v(void) +{ + return 0x00000003U; +} +static inline u32 perf_pmasys_mem_block_target_sys_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 perf_pmasys_mem_block_valid_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 perf_pmasys_mem_block_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 perf_pmasys_mem_block_valid_true_v(void) +{ + return 0x00000001U; +} +static inline u32 perf_pmasys_mem_block_valid_true_f(void) +{ + return 0x80000000U; +} +static inline u32 perf_pmasys_mem_block_valid_false_v(void) +{ + return 0x00000000U; +} +static inline u32 perf_pmasys_mem_block_valid_false_f(void) +{ + return 0x0U; +} +static inline u32 perf_pmasys_outbase_r(void) +{ + return 0x0024a074U; +} +static inline u32 perf_pmasys_outbase_ptr_f(u32 v) +{ + return (v & 0x7ffffffU) << 5U; +} +static inline u32 perf_pmasys_outbaseupper_r(void) +{ + return 0x0024a078U; +} +static inline u32 perf_pmasys_outbaseupper_ptr_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 perf_pmasys_outsize_r(void) +{ + return 0x0024a07cU; +} +static inline u32 perf_pmasys_outsize_numbytes_f(u32 v) +{ + return (v & 0x7ffffffU) << 5U; +} +static inline u32 perf_pmasys_mem_bytes_r(void) +{ + return 0x0024a084U; +} +static inline u32 perf_pmasys_mem_bytes_numbytes_f(u32 v) +{ + return (v & 0xfffffffU) << 4U; +} +static inline u32 perf_pmasys_mem_bump_r(void) +{ + return 0x0024a088U; +} +static inline u32 perf_pmasys_mem_bump_numbytes_f(u32 v) +{ + return (v & 0xfffffffU) << 4U; +} +static inline u32 perf_pmasys_enginestatus_r(void) +{ + return 0x0024a0a4U; +} +static inline u32 perf_pmasys_enginestatus_rbufempty_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 perf_pmasys_enginestatus_rbufempty_empty_v(void) +{ + return 0x00000001U; +} +static inline u32 perf_pmasys_enginestatus_rbufempty_empty_f(void) +{ + return 0x10U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pram_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pram_gv11b.h new file mode 100644 index 000000000..456d6316f --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pram_gv11b.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_pram_gv11b_h_ +#define _hw_pram_gv11b_h_ + +static inline u32 pram_data032_r(u32 i) +{ + return 0x00700000U + i*4U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringmaster_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringmaster_gv11b.h new file mode 100644 index 000000000..a653681d6 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringmaster_gv11b.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_pri_ringmaster_gv11b_h_ +#define _hw_pri_ringmaster_gv11b_h_ + +static inline u32 pri_ringmaster_command_r(void) +{ + return 0x0012004cU; +} +static inline u32 pri_ringmaster_command_cmd_m(void) +{ + return 0x3fU << 0U; +} +static inline u32 pri_ringmaster_command_cmd_v(u32 r) +{ + return (r >> 0U) & 0x3fU; +} +static inline u32 pri_ringmaster_command_cmd_no_cmd_v(void) +{ + return 0x00000000U; +} +static inline u32 pri_ringmaster_command_cmd_start_ring_f(void) +{ + return 0x1U; +} +static inline u32 pri_ringmaster_command_cmd_ack_interrupt_f(void) +{ + return 0x2U; +} +static inline u32 pri_ringmaster_command_cmd_enumerate_stations_f(void) +{ + return 0x3U; +} +static inline u32 pri_ringmaster_command_cmd_enumerate_stations_bc_grp_all_f(void) +{ + return 0x0U; +} +static inline u32 pri_ringmaster_command_data_r(void) +{ + return 0x00120048U; +} +static inline u32 pri_ringmaster_start_results_r(void) +{ + return 0x00120050U; +} +static inline u32 pri_ringmaster_start_results_connectivity_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 pri_ringmaster_start_results_connectivity_pass_v(void) +{ + return 0x00000001U; +} +static inline u32 pri_ringmaster_intr_status0_r(void) +{ + return 0x00120058U; +} +static inline u32 pri_ringmaster_intr_status0_ring_start_conn_fault_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 pri_ringmaster_intr_status0_disconnect_fault_v(u32 r) +{ + return (r >> 1U) & 0x1U; +} +static inline u32 pri_ringmaster_intr_status0_overflow_fault_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 pri_ringmaster_intr_status0_gbl_write_error_sys_v(u32 r) +{ + return (r >> 8U) & 0x1U; +} +static inline u32 pri_ringmaster_intr_status1_r(void) +{ + return 0x0012005cU; +} +static inline u32 pri_ringmaster_global_ctl_r(void) +{ + return 0x00120060U; +} +static inline u32 pri_ringmaster_global_ctl_ring_reset_asserted_f(void) +{ + return 0x1U; +} +static inline u32 pri_ringmaster_global_ctl_ring_reset_deasserted_f(void) +{ + return 0x0U; +} +static inline u32 pri_ringmaster_enum_fbp_r(void) +{ + return 0x00120074U; +} +static inline u32 pri_ringmaster_enum_fbp_count_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 pri_ringmaster_enum_gpc_r(void) +{ + return 0x00120078U; +} +static inline u32 pri_ringmaster_enum_gpc_count_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 pri_ringmaster_enum_ltc_r(void) +{ + return 0x0012006cU; +} +static inline u32 pri_ringmaster_enum_ltc_count_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_gpc_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_gpc_gv11b.h new file mode 100644 index 000000000..47da22c04 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_gpc_gv11b.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_pri_ringstation_gpc_gv11b_h_ +#define _hw_pri_ringstation_gpc_gv11b_h_ + +static inline u32 pri_ringstation_gpc_master_config_r(u32 i) +{ + return 0x00128300U + i*4U; +} +static inline u32 pri_ringstation_gpc_gpc0_priv_error_adr_r(void) +{ + return 0x00128120U; +} +static inline u32 pri_ringstation_gpc_gpc0_priv_error_wrdat_r(void) +{ + return 0x00128124U; +} +static inline u32 pri_ringstation_gpc_gpc0_priv_error_info_r(void) +{ + return 0x00128128U; +} +static inline u32 pri_ringstation_gpc_gpc0_priv_error_code_r(void) +{ + return 0x0012812cU; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_sys_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_sys_gv11b.h new file mode 100644 index 000000000..622b6d7bc --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pri_ringstation_sys_gv11b.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_pri_ringstation_sys_gv11b_h_ +#define _hw_pri_ringstation_sys_gv11b_h_ + +static inline u32 pri_ringstation_sys_master_config_r(u32 i) +{ + return 0x00122300U + i*4U; +} +static inline u32 pri_ringstation_sys_decode_config_r(void) +{ + return 0x00122204U; +} +static inline u32 pri_ringstation_sys_decode_config_ring_m(void) +{ + return 0x7U << 0U; +} +static inline u32 pri_ringstation_sys_decode_config_ring_drop_on_ring_not_started_f(void) +{ + return 0x1U; +} +static inline u32 pri_ringstation_sys_priv_error_adr_r(void) +{ + return 0x00122120U; +} +static inline u32 pri_ringstation_sys_priv_error_wrdat_r(void) +{ + return 0x00122124U; +} +static inline u32 pri_ringstation_sys_priv_error_info_r(void) +{ + return 0x00122128U; +} +static inline u32 pri_ringstation_sys_priv_error_code_r(void) +{ + return 0x0012212cU; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_proj_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_proj_gv11b.h new file mode 100644 index 000000000..808fe3167 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_proj_gv11b.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_proj_gv11b_h_ +#define _hw_proj_gv11b_h_ + +static inline u32 proj_gpc_base_v(void) +{ + return 0x00500000U; +} +static inline u32 proj_gpc_shared_base_v(void) +{ + return 0x00418000U; +} +static inline u32 proj_gpc_stride_v(void) +{ + return 0x00008000U; +} +static inline u32 proj_ltc_stride_v(void) +{ + return 0x00002000U; +} +static inline u32 proj_lts_stride_v(void) +{ + return 0x00000200U; +} +static inline u32 proj_fbpa_stride_v(void) +{ + return 0x00004000U; +} +static inline u32 proj_ppc_in_gpc_base_v(void) +{ + return 0x00003000U; +} +static inline u32 proj_ppc_in_gpc_shared_base_v(void) +{ + return 0x00003e00U; +} +static inline u32 proj_ppc_in_gpc_stride_v(void) +{ + return 0x00000200U; +} +static inline u32 proj_rop_base_v(void) +{ + return 0x00410000U; +} +static inline u32 proj_rop_shared_base_v(void) +{ + return 0x00408800U; +} +static inline u32 proj_rop_stride_v(void) +{ + return 0x00000400U; +} +static inline u32 proj_tpc_in_gpc_base_v(void) +{ + return 0x00004000U; +} +static inline u32 proj_tpc_in_gpc_stride_v(void) +{ + return 0x00000800U; +} +static inline u32 proj_tpc_in_gpc_shared_base_v(void) +{ + return 0x00001800U; +} +static inline u32 proj_smpc_base_v(void) +{ + return 0x00000200U; +} +static inline u32 proj_smpc_shared_base_v(void) +{ + return 0x00000300U; +} +static inline u32 proj_smpc_unique_base_v(void) +{ + return 0x00000600U; +} +static inline u32 proj_smpc_stride_v(void) +{ + return 0x00000100U; +} +static inline u32 proj_host_num_engines_v(void) +{ + return 0x00000004U; +} +static inline u32 proj_host_num_pbdma_v(void) +{ + return 0x00000003U; +} +static inline u32 proj_scal_litter_num_tpc_per_gpc_v(void) +{ + return 0x00000004U; +} +static inline u32 proj_scal_litter_num_fbps_v(void) +{ + return 0x00000001U; +} +static inline u32 proj_scal_litter_num_fbpas_v(void) +{ + return 0x00000001U; +} +static inline u32 proj_scal_litter_num_gpcs_v(void) +{ + return 0x00000001U; +} +static inline u32 proj_scal_litter_num_pes_per_gpc_v(void) +{ + return 0x00000002U; +} +static inline u32 proj_scal_litter_num_tpcs_per_pes_v(void) +{ + return 0x00000002U; +} +static inline u32 proj_scal_litter_num_zcull_banks_v(void) +{ + return 0x00000004U; +} +static inline u32 proj_scal_litter_num_sm_per_tpc_v(void) +{ + return 0x00000002U; +} +static inline u32 proj_scal_max_gpcs_v(void) +{ + return 0x00000020U; +} +static inline u32 proj_scal_max_tpc_per_gpc_v(void) +{ + return 0x00000008U; +} +static inline u32 proj_sm_stride_v(void) +{ + return 0x00000080U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h new file mode 100644 index 000000000..eba6d8067 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_pwr_gv11b.h @@ -0,0 +1,951 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_pwr_gv11b_h_ +#define _hw_pwr_gv11b_h_ + +static inline u32 pwr_falcon_irqsset_r(void) +{ + return 0x0010a000U; +} +static inline u32 pwr_falcon_irqsset_swgen0_set_f(void) +{ + return 0x40U; +} +static inline u32 pwr_falcon_irqsclr_r(void) +{ + return 0x0010a004U; +} +static inline u32 pwr_falcon_irqstat_r(void) +{ + return 0x0010a008U; +} +static inline u32 pwr_falcon_irqstat_halt_true_f(void) +{ + return 0x10U; +} +static inline u32 pwr_falcon_irqstat_exterr_true_f(void) +{ + return 0x20U; +} +static inline u32 pwr_falcon_irqstat_swgen0_true_f(void) +{ + return 0x40U; +} +static inline u32 pwr_falcon_irqstat_ext_second_true_f(void) +{ + return 0x800U; +} +static inline u32 pwr_falcon_irqmode_r(void) +{ + return 0x0010a00cU; +} +static inline u32 pwr_falcon_irqmset_r(void) +{ + return 0x0010a010U; +} +static inline u32 pwr_falcon_irqmset_gptmr_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 pwr_falcon_irqmset_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 pwr_falcon_irqmset_mthd_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 pwr_falcon_irqmset_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 pwr_falcon_irqmset_halt_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 pwr_falcon_irqmset_exterr_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 pwr_falcon_irqmset_swgen0_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 pwr_falcon_irqmset_swgen1_f(u32 v) +{ + return (v & 0x1U) << 7U; +} +static inline u32 pwr_falcon_irqmset_ext_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 pwr_falcon_irqmset_ext_ctxe_f(u32 v) +{ + return (v & 0x1U) << 8U; +} +static inline u32 pwr_falcon_irqmset_ext_limitv_f(u32 v) +{ + return (v & 0x1U) << 9U; +} +static inline u32 pwr_falcon_irqmset_ext_second_f(u32 v) +{ + return (v & 0x1U) << 11U; +} +static inline u32 pwr_falcon_irqmset_ext_therm_f(u32 v) +{ + return (v & 0x1U) << 12U; +} +static inline u32 pwr_falcon_irqmset_ext_miscio_f(u32 v) +{ + return (v & 0x1U) << 13U; +} +static inline u32 pwr_falcon_irqmset_ext_rttimer_f(u32 v) +{ + return (v & 0x1U) << 14U; +} +static inline u32 pwr_falcon_irqmset_ext_rsvd8_f(u32 v) +{ + return (v & 0x1U) << 15U; +} +static inline u32 pwr_falcon_irqmclr_r(void) +{ + return 0x0010a014U; +} +static inline u32 pwr_falcon_irqmclr_gptmr_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 pwr_falcon_irqmclr_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 pwr_falcon_irqmclr_mthd_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 pwr_falcon_irqmclr_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 pwr_falcon_irqmclr_halt_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 pwr_falcon_irqmclr_exterr_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 pwr_falcon_irqmclr_swgen0_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 pwr_falcon_irqmclr_swgen1_f(u32 v) +{ + return (v & 0x1U) << 7U; +} +static inline u32 pwr_falcon_irqmclr_ext_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 pwr_falcon_irqmclr_ext_ctxe_f(u32 v) +{ + return (v & 0x1U) << 8U; +} +static inline u32 pwr_falcon_irqmclr_ext_limitv_f(u32 v) +{ + return (v & 0x1U) << 9U; +} +static inline u32 pwr_falcon_irqmclr_ext_second_f(u32 v) +{ + return (v & 0x1U) << 11U; +} +static inline u32 pwr_falcon_irqmclr_ext_therm_f(u32 v) +{ + return (v & 0x1U) << 12U; +} +static inline u32 pwr_falcon_irqmclr_ext_miscio_f(u32 v) +{ + return (v & 0x1U) << 13U; +} +static inline u32 pwr_falcon_irqmclr_ext_rttimer_f(u32 v) +{ + return (v & 0x1U) << 14U; +} +static inline u32 pwr_falcon_irqmclr_ext_rsvd8_f(u32 v) +{ + return (v & 0x1U) << 15U; +} +static inline u32 pwr_falcon_irqmask_r(void) +{ + return 0x0010a018U; +} +static inline u32 pwr_falcon_irqdest_r(void) +{ + return 0x0010a01cU; +} +static inline u32 pwr_falcon_irqdest_host_gptmr_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 pwr_falcon_irqdest_host_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 pwr_falcon_irqdest_host_mthd_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 pwr_falcon_irqdest_host_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 3U; +} +static inline u32 pwr_falcon_irqdest_host_halt_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 pwr_falcon_irqdest_host_exterr_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 pwr_falcon_irqdest_host_swgen0_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 pwr_falcon_irqdest_host_swgen1_f(u32 v) +{ + return (v & 0x1U) << 7U; +} +static inline u32 pwr_falcon_irqdest_host_ext_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 pwr_falcon_irqdest_host_ext_ctxe_f(u32 v) +{ + return (v & 0x1U) << 8U; +} +static inline u32 pwr_falcon_irqdest_host_ext_limitv_f(u32 v) +{ + return (v & 0x1U) << 9U; +} +static inline u32 pwr_falcon_irqdest_host_ext_second_f(u32 v) +{ + return (v & 0x1U) << 11U; +} +static inline u32 pwr_falcon_irqdest_host_ext_therm_f(u32 v) +{ + return (v & 0x1U) << 12U; +} +static inline u32 pwr_falcon_irqdest_host_ext_miscio_f(u32 v) +{ + return (v & 0x1U) << 13U; +} +static inline u32 pwr_falcon_irqdest_host_ext_rttimer_f(u32 v) +{ + return (v & 0x1U) << 14U; +} +static inline u32 pwr_falcon_irqdest_host_ext_rsvd8_f(u32 v) +{ + return (v & 0x1U) << 15U; +} +static inline u32 pwr_falcon_irqdest_target_gptmr_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 pwr_falcon_irqdest_target_wdtmr_f(u32 v) +{ + return (v & 0x1U) << 17U; +} +static inline u32 pwr_falcon_irqdest_target_mthd_f(u32 v) +{ + return (v & 0x1U) << 18U; +} +static inline u32 pwr_falcon_irqdest_target_ctxsw_f(u32 v) +{ + return (v & 0x1U) << 19U; +} +static inline u32 pwr_falcon_irqdest_target_halt_f(u32 v) +{ + return (v & 0x1U) << 20U; +} +static inline u32 pwr_falcon_irqdest_target_exterr_f(u32 v) +{ + return (v & 0x1U) << 21U; +} +static inline u32 pwr_falcon_irqdest_target_swgen0_f(u32 v) +{ + return (v & 0x1U) << 22U; +} +static inline u32 pwr_falcon_irqdest_target_swgen1_f(u32 v) +{ + return (v & 0x1U) << 23U; +} +static inline u32 pwr_falcon_irqdest_target_ext_f(u32 v) +{ + return (v & 0xffU) << 24U; +} +static inline u32 pwr_falcon_irqdest_target_ext_ctxe_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 pwr_falcon_irqdest_target_ext_limitv_f(u32 v) +{ + return (v & 0x1U) << 25U; +} +static inline u32 pwr_falcon_irqdest_target_ext_second_f(u32 v) +{ + return (v & 0x1U) << 27U; +} +static inline u32 pwr_falcon_irqdest_target_ext_therm_f(u32 v) +{ + return (v & 0x1U) << 28U; +} +static inline u32 pwr_falcon_irqdest_target_ext_miscio_f(u32 v) +{ + return (v & 0x1U) << 29U; +} +static inline u32 pwr_falcon_irqdest_target_ext_rttimer_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 pwr_falcon_irqdest_target_ext_rsvd8_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 pwr_falcon_curctx_r(void) +{ + return 0x0010a050U; +} +static inline u32 pwr_falcon_nxtctx_r(void) +{ + return 0x0010a054U; +} +static inline u32 pwr_falcon_mailbox0_r(void) +{ + return 0x0010a040U; +} +static inline u32 pwr_falcon_mailbox1_r(void) +{ + return 0x0010a044U; +} +static inline u32 pwr_falcon_itfen_r(void) +{ + return 0x0010a048U; +} +static inline u32 pwr_falcon_itfen_ctxen_enable_f(void) +{ + return 0x1U; +} +static inline u32 pwr_falcon_idlestate_r(void) +{ + return 0x0010a04cU; +} +static inline u32 pwr_falcon_idlestate_falcon_busy_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 pwr_falcon_idlestate_ext_busy_v(u32 r) +{ + return (r >> 1U) & 0x7fffU; +} +static inline u32 pwr_falcon_os_r(void) +{ + return 0x0010a080U; +} +static inline u32 pwr_falcon_engctl_r(void) +{ + return 0x0010a0a4U; +} +static inline u32 pwr_falcon_cpuctl_r(void) +{ + return 0x0010a100U; +} +static inline u32 pwr_falcon_cpuctl_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 pwr_falcon_cpuctl_halt_intr_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 pwr_falcon_cpuctl_halt_intr_m(void) +{ + return 0x1U << 4U; +} +static inline u32 pwr_falcon_cpuctl_halt_intr_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_f(u32 v) +{ + return (v & 0x1U) << 6U; +} +static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_m(void) +{ + return 0x1U << 6U; +} +static inline u32 pwr_falcon_cpuctl_cpuctl_alias_en_v(u32 r) +{ + return (r >> 6U) & 0x1U; +} +static inline u32 pwr_falcon_cpuctl_alias_r(void) +{ + return 0x0010a130U; +} +static inline u32 pwr_falcon_cpuctl_alias_startcpu_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 pwr_pmu_scpctl_stat_r(void) +{ + return 0x0010ac08U; +} +static inline u32 pwr_pmu_scpctl_stat_debug_mode_f(u32 v) +{ + return (v & 0x1U) << 20U; +} +static inline u32 pwr_pmu_scpctl_stat_debug_mode_m(void) +{ + return 0x1U << 20U; +} +static inline u32 pwr_pmu_scpctl_stat_debug_mode_v(u32 r) +{ + return (r >> 20U) & 0x1U; +} +static inline u32 pwr_falcon_imemc_r(u32 i) +{ + return 0x0010a180U + i*16U; +} +static inline u32 pwr_falcon_imemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 pwr_falcon_imemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 pwr_falcon_imemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 pwr_falcon_imemd_r(u32 i) +{ + return 0x0010a184U + i*16U; +} +static inline u32 pwr_falcon_imemt_r(u32 i) +{ + return 0x0010a188U + i*16U; +} +static inline u32 pwr_falcon_sctl_r(void) +{ + return 0x0010a240U; +} +static inline u32 pwr_falcon_mmu_phys_sec_r(void) +{ + return 0x00100ce4U; +} +static inline u32 pwr_falcon_bootvec_r(void) +{ + return 0x0010a104U; +} +static inline u32 pwr_falcon_bootvec_vec_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 pwr_falcon_dmactl_r(void) +{ + return 0x0010a10cU; +} +static inline u32 pwr_falcon_dmactl_dmem_scrubbing_m(void) +{ + return 0x1U << 1U; +} +static inline u32 pwr_falcon_dmactl_imem_scrubbing_m(void) +{ + return 0x1U << 2U; +} +static inline u32 pwr_falcon_hwcfg_r(void) +{ + return 0x0010a108U; +} +static inline u32 pwr_falcon_hwcfg_imem_size_v(u32 r) +{ + return (r >> 0U) & 0x1ffU; +} +static inline u32 pwr_falcon_hwcfg_dmem_size_v(u32 r) +{ + return (r >> 9U) & 0x1ffU; +} +static inline u32 pwr_falcon_dmatrfbase_r(void) +{ + return 0x0010a110U; +} +static inline u32 pwr_falcon_dmatrfbase1_r(void) +{ + return 0x0010a128U; +} +static inline u32 pwr_falcon_dmatrfmoffs_r(void) +{ + return 0x0010a114U; +} +static inline u32 pwr_falcon_dmatrfcmd_r(void) +{ + return 0x0010a118U; +} +static inline u32 pwr_falcon_dmatrfcmd_imem_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 pwr_falcon_dmatrfcmd_write_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 pwr_falcon_dmatrfcmd_size_f(u32 v) +{ + return (v & 0x7U) << 8U; +} +static inline u32 pwr_falcon_dmatrfcmd_ctxdma_f(u32 v) +{ + return (v & 0x7U) << 12U; +} +static inline u32 pwr_falcon_dmatrffboffs_r(void) +{ + return 0x0010a11cU; +} +static inline u32 pwr_falcon_exterraddr_r(void) +{ + return 0x0010a168U; +} +static inline u32 pwr_falcon_exterrstat_r(void) +{ + return 0x0010a16cU; +} +static inline u32 pwr_falcon_exterrstat_valid_m(void) +{ + return 0x1U << 31U; +} +static inline u32 pwr_falcon_exterrstat_valid_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 pwr_falcon_exterrstat_valid_true_v(void) +{ + return 0x00000001U; +} +static inline u32 pwr_pmu_falcon_icd_cmd_r(void) +{ + return 0x0010a200U; +} +static inline u32 pwr_pmu_falcon_icd_cmd_opc_s(void) +{ + return 4U; +} +static inline u32 pwr_pmu_falcon_icd_cmd_opc_f(u32 v) +{ + return (v & 0xfU) << 0U; +} +static inline u32 pwr_pmu_falcon_icd_cmd_opc_m(void) +{ + return 0xfU << 0U; +} +static inline u32 pwr_pmu_falcon_icd_cmd_opc_v(u32 r) +{ + return (r >> 0U) & 0xfU; +} +static inline u32 pwr_pmu_falcon_icd_cmd_opc_rreg_f(void) +{ + return 0x8U; +} +static inline u32 pwr_pmu_falcon_icd_cmd_opc_rstat_f(void) +{ + return 0xeU; +} +static inline u32 pwr_pmu_falcon_icd_cmd_idx_f(u32 v) +{ + return (v & 0x1fU) << 8U; +} +static inline u32 pwr_pmu_falcon_icd_rdata_r(void) +{ + return 0x0010a20cU; +} +static inline u32 pwr_falcon_dmemc_r(u32 i) +{ + return 0x0010a1c0U + i*8U; +} +static inline u32 pwr_falcon_dmemc_offs_f(u32 v) +{ + return (v & 0x3fU) << 2U; +} +static inline u32 pwr_falcon_dmemc_offs_m(void) +{ + return 0x3fU << 2U; +} +static inline u32 pwr_falcon_dmemc_blk_f(u32 v) +{ + return (v & 0xffU) << 8U; +} +static inline u32 pwr_falcon_dmemc_blk_m(void) +{ + return 0xffU << 8U; +} +static inline u32 pwr_falcon_dmemc_aincw_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 pwr_falcon_dmemc_aincr_f(u32 v) +{ + return (v & 0x1U) << 25U; +} +static inline u32 pwr_falcon_dmemd_r(u32 i) +{ + return 0x0010a1c4U + i*8U; +} +static inline u32 pwr_pmu_new_instblk_r(void) +{ + return 0x0010a480U; +} +static inline u32 pwr_pmu_new_instblk_ptr_f(u32 v) +{ + return (v & 0xfffffffU) << 0U; +} +static inline u32 pwr_pmu_new_instblk_target_fb_f(void) +{ + return 0x0U; +} +static inline u32 pwr_pmu_new_instblk_target_sys_coh_f(void) +{ + return 0x20000000U; +} +static inline u32 pwr_pmu_new_instblk_target_sys_ncoh_f(void) +{ + return 0x30000000U; +} +static inline u32 pwr_pmu_new_instblk_valid_f(u32 v) +{ + return (v & 0x1U) << 30U; +} +static inline u32 pwr_pmu_mutex_id_r(void) +{ + return 0x0010a488U; +} +static inline u32 pwr_pmu_mutex_id_value_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 pwr_pmu_mutex_id_value_init_v(void) +{ + return 0x00000000U; +} +static inline u32 pwr_pmu_mutex_id_value_not_avail_v(void) +{ + return 0x000000ffU; +} +static inline u32 pwr_pmu_mutex_id_release_r(void) +{ + return 0x0010a48cU; +} +static inline u32 pwr_pmu_mutex_id_release_value_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 pwr_pmu_mutex_id_release_value_m(void) +{ + return 0xffU << 0U; +} +static inline u32 pwr_pmu_mutex_id_release_value_init_v(void) +{ + return 0x00000000U; +} +static inline u32 pwr_pmu_mutex_id_release_value_init_f(void) +{ + return 0x0U; +} +static inline u32 pwr_pmu_mutex_r(u32 i) +{ + return 0x0010a580U + i*4U; +} +static inline u32 pwr_pmu_mutex__size_1_v(void) +{ + return 0x00000010U; +} +static inline u32 pwr_pmu_mutex_value_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 pwr_pmu_mutex_value_v(u32 r) +{ + return (r >> 0U) & 0xffU; +} +static inline u32 pwr_pmu_mutex_value_initial_lock_f(void) +{ + return 0x0U; +} +static inline u32 pwr_pmu_queue_head_r(u32 i) +{ + return 0x0010a800U + i*4U; +} +static inline u32 pwr_pmu_queue_head__size_1_v(void) +{ + return 0x00000008U; +} +static inline u32 pwr_pmu_queue_head_address_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 pwr_pmu_queue_head_address_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 pwr_pmu_queue_tail_r(u32 i) +{ + return 0x0010a820U + i*4U; +} +static inline u32 pwr_pmu_queue_tail__size_1_v(void) +{ + return 0x00000008U; +} +static inline u32 pwr_pmu_queue_tail_address_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 pwr_pmu_queue_tail_address_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 pwr_pmu_msgq_head_r(void) +{ + return 0x0010a4c8U; +} +static inline u32 pwr_pmu_msgq_head_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 pwr_pmu_msgq_head_val_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 pwr_pmu_msgq_tail_r(void) +{ + return 0x0010a4ccU; +} +static inline u32 pwr_pmu_msgq_tail_val_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 pwr_pmu_msgq_tail_val_v(u32 r) +{ + return (r >> 0U) & 0xffffffffU; +} +static inline u32 pwr_pmu_idle_mask_r(u32 i) +{ + return 0x0010a504U + i*16U; +} +static inline u32 pwr_pmu_idle_mask_gr_enabled_f(void) +{ + return 0x1U; +} +static inline u32 pwr_pmu_idle_mask_ce_2_enabled_f(void) +{ + return 0x200000U; +} +static inline u32 pwr_pmu_idle_count_r(u32 i) +{ + return 0x0010a508U + i*16U; +} +static inline u32 pwr_pmu_idle_count_value_f(u32 v) +{ + return (v & 0x7fffffffU) << 0U; +} +static inline u32 pwr_pmu_idle_count_value_v(u32 r) +{ + return (r >> 0U) & 0x7fffffffU; +} +static inline u32 pwr_pmu_idle_count_reset_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 pwr_pmu_idle_ctrl_r(u32 i) +{ + return 0x0010a50cU + i*16U; +} +static inline u32 pwr_pmu_idle_ctrl_value_m(void) +{ + return 0x3U << 0U; +} +static inline u32 pwr_pmu_idle_ctrl_value_busy_f(void) +{ + return 0x2U; +} +static inline u32 pwr_pmu_idle_ctrl_value_always_f(void) +{ + return 0x3U; +} +static inline u32 pwr_pmu_idle_ctrl_filter_m(void) +{ + return 0x1U << 2U; +} +static inline u32 pwr_pmu_idle_ctrl_filter_disabled_f(void) +{ + return 0x0U; +} +static inline u32 pwr_pmu_idle_mask_supp_r(u32 i) +{ + return 0x0010a9f0U + i*8U; +} +static inline u32 pwr_pmu_idle_mask_1_supp_r(u32 i) +{ + return 0x0010a9f4U + i*8U; +} +static inline u32 pwr_pmu_idle_ctrl_supp_r(u32 i) +{ + return 0x0010aa30U + i*8U; +} +static inline u32 pwr_pmu_debug_r(u32 i) +{ + return 0x0010a5c0U + i*4U; +} +static inline u32 pwr_pmu_debug__size_1_v(void) +{ + return 0x00000004U; +} +static inline u32 pwr_pmu_mailbox_r(u32 i) +{ + return 0x0010a450U + i*4U; +} +static inline u32 pwr_pmu_mailbox__size_1_v(void) +{ + return 0x0000000cU; +} +static inline u32 pwr_pmu_bar0_addr_r(void) +{ + return 0x0010a7a0U; +} +static inline u32 pwr_pmu_bar0_data_r(void) +{ + return 0x0010a7a4U; +} +static inline u32 pwr_pmu_bar0_ctl_r(void) +{ + return 0x0010a7acU; +} +static inline u32 pwr_pmu_bar0_timeout_r(void) +{ + return 0x0010a7a8U; +} +static inline u32 pwr_pmu_bar0_fecs_error_r(void) +{ + return 0x0010a988U; +} +static inline u32 pwr_pmu_bar0_error_status_r(void) +{ + return 0x0010a7b0U; +} +static inline u32 pwr_pmu_pg_idlefilth_r(u32 i) +{ + return 0x0010a6c0U + i*4U; +} +static inline u32 pwr_pmu_pg_ppuidlefilth_r(u32 i) +{ + return 0x0010a6e8U + i*4U; +} +static inline u32 pwr_pmu_pg_idle_cnt_r(u32 i) +{ + return 0x0010a710U + i*4U; +} +static inline u32 pwr_pmu_pg_intren_r(u32 i) +{ + return 0x0010a760U + i*4U; +} +static inline u32 pwr_fbif_transcfg_r(u32 i) +{ + return 0x0010ae00U + i*4U; +} +static inline u32 pwr_fbif_transcfg_target_local_fb_f(void) +{ + return 0x0U; +} +static inline u32 pwr_fbif_transcfg_target_coherent_sysmem_f(void) +{ + return 0x1U; +} +static inline u32 pwr_fbif_transcfg_target_noncoherent_sysmem_f(void) +{ + return 0x2U; +} +static inline u32 pwr_fbif_transcfg_mem_type_s(void) +{ + return 1U; +} +static inline u32 pwr_fbif_transcfg_mem_type_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 pwr_fbif_transcfg_mem_type_m(void) +{ + return 0x1U << 2U; +} +static inline u32 pwr_fbif_transcfg_mem_type_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 pwr_fbif_transcfg_mem_type_virtual_f(void) +{ + return 0x0U; +} +static inline u32 pwr_fbif_transcfg_mem_type_physical_f(void) +{ + return 0x4U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ram_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ram_gv11b.h new file mode 100644 index 000000000..1191e5808 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_ram_gv11b.h @@ -0,0 +1,775 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_ram_gv11b_h_ +#define _hw_ram_gv11b_h_ + +static inline u32 ram_in_ramfc_s(void) +{ + return 4096U; +} +static inline u32 ram_in_ramfc_w(void) +{ + return 0U; +} +static inline u32 ram_in_page_dir_base_target_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 ram_in_page_dir_base_target_w(void) +{ + return 128U; +} +static inline u32 ram_in_page_dir_base_target_vid_mem_f(void) +{ + return 0x0U; +} +static inline u32 ram_in_page_dir_base_target_sys_mem_coh_f(void) +{ + return 0x2U; +} +static inline u32 ram_in_page_dir_base_target_sys_mem_ncoh_f(void) +{ + return 0x3U; +} +static inline u32 ram_in_page_dir_base_vol_w(void) +{ + return 128U; +} +static inline u32 ram_in_page_dir_base_vol_true_f(void) +{ + return 0x4U; +} +static inline u32 ram_in_page_dir_base_vol_false_f(void) +{ + return 0x0U; +} +static inline u32 ram_in_page_dir_base_fault_replay_tex_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 ram_in_page_dir_base_fault_replay_tex_m(void) +{ + return 0x1U << 4U; +} +static inline u32 ram_in_page_dir_base_fault_replay_tex_w(void) +{ + return 128U; +} +static inline u32 ram_in_page_dir_base_fault_replay_tex_true_f(void) +{ + return 0x10U; +} +static inline u32 ram_in_page_dir_base_fault_replay_gcc_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 ram_in_page_dir_base_fault_replay_gcc_m(void) +{ + return 0x1U << 5U; +} +static inline u32 ram_in_page_dir_base_fault_replay_gcc_w(void) +{ + return 128U; +} +static inline u32 ram_in_page_dir_base_fault_replay_gcc_true_f(void) +{ + return 0x20U; +} +static inline u32 ram_in_big_page_size_f(u32 v) +{ + return (v & 0x1U) << 11U; +} +static inline u32 ram_in_big_page_size_m(void) +{ + return 0x1U << 11U; +} +static inline u32 ram_in_big_page_size_w(void) +{ + return 128U; +} +static inline u32 ram_in_big_page_size_128kb_f(void) +{ + return 0x0U; +} +static inline u32 ram_in_big_page_size_64kb_f(void) +{ + return 0x800U; +} +static inline u32 ram_in_page_dir_base_lo_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 ram_in_page_dir_base_lo_w(void) +{ + return 128U; +} +static inline u32 ram_in_page_dir_base_hi_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ram_in_page_dir_base_hi_w(void) +{ + return 129U; +} +static inline u32 ram_in_engine_cs_w(void) +{ + return 132U; +} +static inline u32 ram_in_engine_cs_wfi_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_engine_cs_wfi_f(void) +{ + return 0x0U; +} +static inline u32 ram_in_engine_cs_fg_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_engine_cs_fg_f(void) +{ + return 0x8U; +} +static inline u32 ram_in_engine_wfi_mode_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 ram_in_engine_wfi_mode_w(void) +{ + return 132U; +} +static inline u32 ram_in_engine_wfi_mode_physical_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_engine_wfi_mode_virtual_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_engine_wfi_target_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 ram_in_engine_wfi_target_w(void) +{ + return 132U; +} +static inline u32 ram_in_engine_wfi_target_sys_mem_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 ram_in_engine_wfi_target_sys_mem_ncoh_v(void) +{ + return 0x00000003U; +} +static inline u32 ram_in_engine_wfi_target_local_mem_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_engine_wfi_ptr_lo_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 ram_in_engine_wfi_ptr_lo_w(void) +{ + return 132U; +} +static inline u32 ram_in_engine_wfi_ptr_hi_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 ram_in_engine_wfi_ptr_hi_w(void) +{ + return 133U; +} +static inline u32 ram_in_engine_wfi_veid_f(u32 v) +{ + return (v & 0x3fU) << 0U; +} +static inline u32 ram_in_engine_wfi_veid_w(void) +{ + return 134U; +} +static inline u32 ram_in_eng_method_buffer_addr_lo_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ram_in_eng_method_buffer_addr_lo_w(void) +{ + return 136U; +} +static inline u32 ram_in_eng_method_buffer_addr_hi_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 ram_in_eng_method_buffer_addr_hi_w(void) +{ + return 137U; +} +static inline u32 ram_in_sc_page_dir_base_target_f(u32 v, u32 i) +{ + return (v & 0x3U) << (0U + i*0U); +} +static inline u32 ram_in_sc_page_dir_base_target__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_page_dir_base_target_vid_mem_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_sc_page_dir_base_target_invalid_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_sc_page_dir_base_target_sys_mem_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 ram_in_sc_page_dir_base_target_sys_mem_ncoh_v(void) +{ + return 0x00000003U; +} +static inline u32 ram_in_sc_page_dir_base_vol_f(u32 v, u32 i) +{ + return (v & 0x1U) << (2U + i*0U); +} +static inline u32 ram_in_sc_page_dir_base_vol__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_page_dir_base_vol_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_sc_page_dir_base_vol_false_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_f(u32 v, u32 i) +{ + return (v & 0x1U) << (4U + i*0U); +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_tex__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_enabled_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_disabled_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_f(u32 v, u32 i) +{ + return (v & 0x1U) << (5U + i*0U); +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_enabled_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_disabled_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_sc_use_ver2_pt_format_f(u32 v, u32 i) +{ + return (v & 0x1U) << (10U + i*0U); +} +static inline u32 ram_in_sc_use_ver2_pt_format__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_use_ver2_pt_format_false_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_in_sc_use_ver2_pt_format_true_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_sc_big_page_size_f(u32 v, u32 i) +{ + return (v & 0x1U) << (11U + i*0U); +} +static inline u32 ram_in_sc_big_page_size__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_big_page_size_64kb_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_in_sc_page_dir_base_lo_f(u32 v, u32 i) +{ + return (v & 0xfffffU) << (12U + i*0U); +} +static inline u32 ram_in_sc_page_dir_base_lo__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_page_dir_base_hi_f(u32 v, u32 i) +{ + return (v & 0xffffffffU) << (0U + i*0U); +} +static inline u32 ram_in_sc_page_dir_base_hi__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 ram_in_sc_page_dir_base_target_0_f(u32 v) +{ + return (v & 0x3U) << 0U; +} +static inline u32 ram_in_sc_page_dir_base_target_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_page_dir_base_vol_0_f(u32 v) +{ + return (v & 0x1U) << 2U; +} +static inline u32 ram_in_sc_page_dir_base_vol_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_f(u32 v) +{ + return (v & 0x1U) << 4U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_tex_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_f(u32 v) +{ + return (v & 0x1U) << 5U; +} +static inline u32 ram_in_sc_page_dir_base_fault_replay_gcc_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_use_ver2_pt_format_0_f(u32 v) +{ + return (v & 0x1U) << 10U; +} +static inline u32 ram_in_sc_use_ver2_pt_format_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_big_page_size_0_f(u32 v) +{ + return (v & 0x1U) << 11U; +} +static inline u32 ram_in_sc_big_page_size_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_page_dir_base_lo_0_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 ram_in_sc_page_dir_base_lo_0_w(void) +{ + return 168U; +} +static inline u32 ram_in_sc_page_dir_base_hi_0_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ram_in_sc_page_dir_base_hi_0_w(void) +{ + return 169U; +} +static inline u32 ram_in_base_shift_v(void) +{ + return 0x0000000cU; +} +static inline u32 ram_in_alloc_size_v(void) +{ + return 0x00001000U; +} +static inline u32 ram_fc_size_val_v(void) +{ + return 0x00000200U; +} +static inline u32 ram_fc_gp_put_w(void) +{ + return 0U; +} +static inline u32 ram_fc_userd_w(void) +{ + return 2U; +} +static inline u32 ram_fc_userd_hi_w(void) +{ + return 3U; +} +static inline u32 ram_fc_signature_w(void) +{ + return 4U; +} +static inline u32 ram_fc_gp_get_w(void) +{ + return 5U; +} +static inline u32 ram_fc_pb_get_w(void) +{ + return 6U; +} +static inline u32 ram_fc_pb_get_hi_w(void) +{ + return 7U; +} +static inline u32 ram_fc_pb_top_level_get_w(void) +{ + return 8U; +} +static inline u32 ram_fc_pb_top_level_get_hi_w(void) +{ + return 9U; +} +static inline u32 ram_fc_acquire_w(void) +{ + return 12U; +} +static inline u32 ram_fc_sem_addr_hi_w(void) +{ + return 14U; +} +static inline u32 ram_fc_sem_addr_lo_w(void) +{ + return 15U; +} +static inline u32 ram_fc_sem_payload_lo_w(void) +{ + return 16U; +} +static inline u32 ram_fc_sem_payload_hi_w(void) +{ + return 39U; +} +static inline u32 ram_fc_sem_execute_w(void) +{ + return 17U; +} +static inline u32 ram_fc_gp_base_w(void) +{ + return 18U; +} +static inline u32 ram_fc_gp_base_hi_w(void) +{ + return 19U; +} +static inline u32 ram_fc_gp_fetch_w(void) +{ + return 20U; +} +static inline u32 ram_fc_pb_fetch_w(void) +{ + return 21U; +} +static inline u32 ram_fc_pb_fetch_hi_w(void) +{ + return 22U; +} +static inline u32 ram_fc_pb_put_w(void) +{ + return 23U; +} +static inline u32 ram_fc_pb_put_hi_w(void) +{ + return 24U; +} +static inline u32 ram_fc_pb_header_w(void) +{ + return 33U; +} +static inline u32 ram_fc_pb_count_w(void) +{ + return 34U; +} +static inline u32 ram_fc_subdevice_w(void) +{ + return 37U; +} +static inline u32 ram_fc_target_w(void) +{ + return 43U; +} +static inline u32 ram_fc_hce_ctrl_w(void) +{ + return 57U; +} +static inline u32 ram_fc_chid_w(void) +{ + return 58U; +} +static inline u32 ram_fc_chid_id_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 ram_fc_chid_id_w(void) +{ + return 0U; +} +static inline u32 ram_fc_config_w(void) +{ + return 61U; +} +static inline u32 ram_fc_runlist_timeslice_w(void) +{ + return 62U; +} +static inline u32 ram_fc_set_channel_info_w(void) +{ + return 63U; +} +static inline u32 ram_userd_base_shift_v(void) +{ + return 0x00000009U; +} +static inline u32 ram_userd_chan_size_v(void) +{ + return 0x00000200U; +} +static inline u32 ram_userd_put_w(void) +{ + return 16U; +} +static inline u32 ram_userd_get_w(void) +{ + return 17U; +} +static inline u32 ram_userd_ref_w(void) +{ + return 18U; +} +static inline u32 ram_userd_put_hi_w(void) +{ + return 19U; +} +static inline u32 ram_userd_ref_threshold_w(void) +{ + return 20U; +} +static inline u32 ram_userd_top_level_get_w(void) +{ + return 22U; +} +static inline u32 ram_userd_top_level_get_hi_w(void) +{ + return 23U; +} +static inline u32 ram_userd_get_hi_w(void) +{ + return 24U; +} +static inline u32 ram_userd_gp_get_w(void) +{ + return 34U; +} +static inline u32 ram_userd_gp_put_w(void) +{ + return 35U; +} +static inline u32 ram_userd_gp_top_level_get_w(void) +{ + return 22U; +} +static inline u32 ram_userd_gp_top_level_get_hi_w(void) +{ + return 23U; +} +static inline u32 ram_rl_entry_size_v(void) +{ + return 0x00000010U; +} +static inline u32 ram_rl_entry_type_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 ram_rl_entry_type_channel_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_rl_entry_type_tsg_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_rl_entry_id_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 ram_rl_entry_chan_runqueue_selector_f(u32 v) +{ + return (v & 0x1U) << 1U; +} +static inline u32 ram_rl_entry_chan_inst_target_f(u32 v) +{ + return (v & 0x3U) << 4U; +} +static inline u32 ram_rl_entry_chan_inst_target_sys_mem_ncoh_v(void) +{ + return 0x00000003U; +} +static inline u32 ram_rl_entry_chan_inst_target_sys_mem_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 ram_rl_entry_chan_inst_target_vid_mem_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_rl_entry_chan_userd_target_f(u32 v) +{ + return (v & 0x3U) << 6U; +} +static inline u32 ram_rl_entry_chan_userd_target_vid_mem_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_rl_entry_chan_userd_target_vid_mem_nvlink_coh_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_rl_entry_chan_userd_target_sys_mem_coh_v(void) +{ + return 0x00000002U; +} +static inline u32 ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(void) +{ + return 0x00000003U; +} +static inline u32 ram_rl_entry_chan_userd_ptr_lo_f(u32 v) +{ + return (v & 0xffffffU) << 8U; +} +static inline u32 ram_rl_entry_chan_userd_ptr_hi_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ram_rl_entry_chid_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 ram_rl_entry_chan_inst_ptr_lo_f(u32 v) +{ + return (v & 0xfffffU) << 12U; +} +static inline u32 ram_rl_entry_chan_inst_ptr_hi_f(u32 v) +{ + return (v & 0xffffffffU) << 0U; +} +static inline u32 ram_rl_entry_tsg_timeslice_scale_f(u32 v) +{ + return (v & 0xfU) << 16U; +} +static inline u32 ram_rl_entry_tsg_timeslice_scale_3_v(void) +{ + return 0x00000003U; +} +static inline u32 ram_rl_entry_tsg_timeslice_timeout_f(u32 v) +{ + return (v & 0xffU) << 24U; +} +static inline u32 ram_rl_entry_tsg_timeslice_timeout_128_v(void) +{ + return 0x00000080U; +} +static inline u32 ram_rl_entry_tsg_timeslice_timeout_disable_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_rl_entry_tsg_length_f(u32 v) +{ + return (v & 0xffU) << 0U; +} +static inline u32 ram_rl_entry_tsg_length_init_v(void) +{ + return 0x00000000U; +} +static inline u32 ram_rl_entry_tsg_length_min_v(void) +{ + return 0x00000001U; +} +static inline u32 ram_rl_entry_tsg_length_max_v(void) +{ + return 0x00000080U; +} +static inline u32 ram_rl_entry_tsg_tsgid_f(u32 v) +{ + return (v & 0xfffU) << 0U; +} +static inline u32 ram_rl_entry_chan_userd_ptr_align_shift_v(void) +{ + return 0x00000008U; +} +static inline u32 ram_rl_entry_chan_userd_align_shift_v(void) +{ + return 0x00000008U; +} +static inline u32 ram_rl_entry_chan_inst_ptr_align_shift_v(void) +{ + return 0x0000000cU; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_therm_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_therm_gv11b.h new file mode 100644 index 000000000..8f8981e8d --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_therm_gv11b.h @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_therm_gv11b_h_ +#define _hw_therm_gv11b_h_ + +static inline u32 therm_use_a_r(void) +{ + return 0x00020798U; +} +static inline u32 therm_use_a_ext_therm_0_enable_f(void) +{ + return 0x1U; +} +static inline u32 therm_use_a_ext_therm_1_enable_f(void) +{ + return 0x2U; +} +static inline u32 therm_use_a_ext_therm_2_enable_f(void) +{ + return 0x4U; +} +static inline u32 therm_evt_ext_therm_0_r(void) +{ + return 0x00020700U; +} +static inline u32 therm_evt_ext_therm_0_slow_factor_f(u32 v) +{ + return (v & 0x3fU) << 24U; +} +static inline u32 therm_evt_ext_therm_0_slow_factor_init_v(void) +{ + return 0x00000001U; +} +static inline u32 therm_evt_ext_therm_0_mode_f(u32 v) +{ + return (v & 0x3U) << 30U; +} +static inline u32 therm_evt_ext_therm_0_mode_normal_v(void) +{ + return 0x00000000U; +} +static inline u32 therm_evt_ext_therm_0_mode_inverted_v(void) +{ + return 0x00000001U; +} +static inline u32 therm_evt_ext_therm_0_mode_forced_v(void) +{ + return 0x00000002U; +} +static inline u32 therm_evt_ext_therm_0_mode_cleared_v(void) +{ + return 0x00000003U; +} +static inline u32 therm_evt_ext_therm_1_r(void) +{ + return 0x00020704U; +} +static inline u32 therm_evt_ext_therm_1_slow_factor_f(u32 v) +{ + return (v & 0x3fU) << 24U; +} +static inline u32 therm_evt_ext_therm_1_slow_factor_init_v(void) +{ + return 0x00000002U; +} +static inline u32 therm_evt_ext_therm_1_mode_f(u32 v) +{ + return (v & 0x3U) << 30U; +} +static inline u32 therm_evt_ext_therm_1_mode_normal_v(void) +{ + return 0x00000000U; +} +static inline u32 therm_evt_ext_therm_1_mode_inverted_v(void) +{ + return 0x00000001U; +} +static inline u32 therm_evt_ext_therm_1_mode_forced_v(void) +{ + return 0x00000002U; +} +static inline u32 therm_evt_ext_therm_1_mode_cleared_v(void) +{ + return 0x00000003U; +} +static inline u32 therm_evt_ext_therm_2_r(void) +{ + return 0x00020708U; +} +static inline u32 therm_evt_ext_therm_2_slow_factor_f(u32 v) +{ + return (v & 0x3fU) << 24U; +} +static inline u32 therm_evt_ext_therm_2_slow_factor_init_v(void) +{ + return 0x00000003U; +} +static inline u32 therm_evt_ext_therm_2_mode_f(u32 v) +{ + return (v & 0x3U) << 30U; +} +static inline u32 therm_evt_ext_therm_2_mode_normal_v(void) +{ + return 0x00000000U; +} +static inline u32 therm_evt_ext_therm_2_mode_inverted_v(void) +{ + return 0x00000001U; +} +static inline u32 therm_evt_ext_therm_2_mode_forced_v(void) +{ + return 0x00000002U; +} +static inline u32 therm_evt_ext_therm_2_mode_cleared_v(void) +{ + return 0x00000003U; +} +static inline u32 therm_weight_1_r(void) +{ + return 0x00020024U; +} +static inline u32 therm_config1_r(void) +{ + return 0x00020050U; +} +static inline u32 therm_config2_r(void) +{ + return 0x00020130U; +} +static inline u32 therm_config2_slowdown_factor_extended_f(u32 v) +{ + return (v & 0x1U) << 24U; +} +static inline u32 therm_config2_grad_enable_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 therm_gate_ctrl_r(u32 i) +{ + return 0x00020200U + i*4U; +} +static inline u32 therm_gate_ctrl_eng_clk_m(void) +{ + return 0x3U << 0U; +} +static inline u32 therm_gate_ctrl_eng_clk_run_f(void) +{ + return 0x0U; +} +static inline u32 therm_gate_ctrl_eng_clk_auto_f(void) +{ + return 0x1U; +} +static inline u32 therm_gate_ctrl_eng_clk_stop_f(void) +{ + return 0x2U; +} +static inline u32 therm_gate_ctrl_blk_clk_m(void) +{ + return 0x3U << 2U; +} +static inline u32 therm_gate_ctrl_blk_clk_run_f(void) +{ + return 0x0U; +} +static inline u32 therm_gate_ctrl_blk_clk_auto_f(void) +{ + return 0x4U; +} +static inline u32 therm_gate_ctrl_idle_holdoff_m(void) +{ + return 0x1U << 4U; +} +static inline u32 therm_gate_ctrl_idle_holdoff_off_f(void) +{ + return 0x0U; +} +static inline u32 therm_gate_ctrl_idle_holdoff_on_f(void) +{ + return 0x10U; +} +static inline u32 therm_gate_ctrl_eng_idle_filt_exp_f(u32 v) +{ + return (v & 0x1fU) << 8U; +} +static inline u32 therm_gate_ctrl_eng_idle_filt_exp_m(void) +{ + return 0x1fU << 8U; +} +static inline u32 therm_gate_ctrl_eng_idle_filt_exp__prod_f(void) +{ + return 0x200U; +} +static inline u32 therm_gate_ctrl_eng_idle_filt_mant_f(u32 v) +{ + return (v & 0x7U) << 13U; +} +static inline u32 therm_gate_ctrl_eng_idle_filt_mant_m(void) +{ + return 0x7U << 13U; +} +static inline u32 therm_gate_ctrl_eng_idle_filt_mant__prod_f(void) +{ + return 0x2000U; +} +static inline u32 therm_gate_ctrl_eng_delay_before_f(u32 v) +{ + return (v & 0xfU) << 16U; +} +static inline u32 therm_gate_ctrl_eng_delay_before_m(void) +{ + return 0xfU << 16U; +} +static inline u32 therm_gate_ctrl_eng_delay_before__prod_f(void) +{ + return 0x40000U; +} +static inline u32 therm_gate_ctrl_eng_delay_after_f(u32 v) +{ + return (v & 0xfU) << 20U; +} +static inline u32 therm_gate_ctrl_eng_delay_after_m(void) +{ + return 0xfU << 20U; +} +static inline u32 therm_gate_ctrl_eng_delay_after__prod_f(void) +{ + return 0x0U; +} +static inline u32 therm_fecs_idle_filter_r(void) +{ + return 0x00020288U; +} +static inline u32 therm_fecs_idle_filter_value_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 therm_fecs_idle_filter_value__prod_f(void) +{ + return 0x0U; +} +static inline u32 therm_hubmmu_idle_filter_r(void) +{ + return 0x0002028cU; +} +static inline u32 therm_hubmmu_idle_filter_value_m(void) +{ + return 0xffffffffU << 0U; +} +static inline u32 therm_hubmmu_idle_filter_value__prod_f(void) +{ + return 0x0U; +} +static inline u32 therm_clk_slowdown_r(u32 i) +{ + return 0x00020160U + i*4U; +} +static inline u32 therm_clk_slowdown_idle_factor_f(u32 v) +{ + return (v & 0x3fU) << 16U; +} +static inline u32 therm_clk_slowdown_idle_factor_m(void) +{ + return 0x3fU << 16U; +} +static inline u32 therm_clk_slowdown_idle_factor_v(u32 r) +{ + return (r >> 16U) & 0x3fU; +} +static inline u32 therm_clk_slowdown_idle_factor_disabled_f(void) +{ + return 0x0U; +} +static inline u32 therm_grad_stepping_table_r(u32 i) +{ + return 0x000202c8U + i*4U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor0_f(u32 v) +{ + return (v & 0x3fU) << 0U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor0_m(void) +{ + return 0x3fU << 0U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by1p5_f(void) +{ + return 0x1U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by2_f(void) +{ + return 0x2U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by4_f(void) +{ + return 0x6U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor0_fpdiv_by8_f(void) +{ + return 0xeU; +} +static inline u32 therm_grad_stepping_table_slowdown_factor1_f(u32 v) +{ + return (v & 0x3fU) << 6U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor1_m(void) +{ + return 0x3fU << 6U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor2_f(u32 v) +{ + return (v & 0x3fU) << 12U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor2_m(void) +{ + return 0x3fU << 12U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor3_f(u32 v) +{ + return (v & 0x3fU) << 18U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor3_m(void) +{ + return 0x3fU << 18U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor4_f(u32 v) +{ + return (v & 0x3fU) << 24U; +} +static inline u32 therm_grad_stepping_table_slowdown_factor4_m(void) +{ + return 0x3fU << 24U; +} +static inline u32 therm_grad_stepping0_r(void) +{ + return 0x000202c0U; +} +static inline u32 therm_grad_stepping0_feature_s(void) +{ + return 1U; +} +static inline u32 therm_grad_stepping0_feature_f(u32 v) +{ + return (v & 0x1U) << 0U; +} +static inline u32 therm_grad_stepping0_feature_m(void) +{ + return 0x1U << 0U; +} +static inline u32 therm_grad_stepping0_feature_v(u32 r) +{ + return (r >> 0U) & 0x1U; +} +static inline u32 therm_grad_stepping0_feature_enable_f(void) +{ + return 0x1U; +} +static inline u32 therm_grad_stepping1_r(void) +{ + return 0x000202c4U; +} +static inline u32 therm_grad_stepping1_pdiv_duration_f(u32 v) +{ + return (v & 0x1ffffU) << 0U; +} +static inline u32 therm_clk_timing_r(u32 i) +{ + return 0x000203c0U + i*4U; +} +static inline u32 therm_clk_timing_grad_slowdown_f(u32 v) +{ + return (v & 0x1U) << 16U; +} +static inline u32 therm_clk_timing_grad_slowdown_m(void) +{ + return 0x1U << 16U; +} +static inline u32 therm_clk_timing_grad_slowdown_enabled_f(void) +{ + return 0x10000U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_timer_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_timer_gv11b.h new file mode 100644 index 000000000..61440213b --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_timer_gv11b.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_timer_gv11b_h_ +#define _hw_timer_gv11b_h_ + +static inline u32 timer_pri_timeout_r(void) +{ + return 0x00009080U; +} +static inline u32 timer_pri_timeout_period_f(u32 v) +{ + return (v & 0xffffffU) << 0U; +} +static inline u32 timer_pri_timeout_period_m(void) +{ + return 0xffffffU << 0U; +} +static inline u32 timer_pri_timeout_period_v(u32 r) +{ + return (r >> 0U) & 0xffffffU; +} +static inline u32 timer_pri_timeout_en_f(u32 v) +{ + return (v & 0x1U) << 31U; +} +static inline u32 timer_pri_timeout_en_m(void) +{ + return 0x1U << 31U; +} +static inline u32 timer_pri_timeout_en_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 timer_pri_timeout_en_en_enabled_f(void) +{ + return 0x80000000U; +} +static inline u32 timer_pri_timeout_en_en_disabled_f(void) +{ + return 0x0U; +} +static inline u32 timer_pri_timeout_save_0_r(void) +{ + return 0x00009084U; +} +static inline u32 timer_pri_timeout_save_1_r(void) +{ + return 0x00009088U; +} +static inline u32 timer_pri_timeout_fecs_errcode_r(void) +{ + return 0x0000908cU; +} +static inline u32 timer_time_0_r(void) +{ + return 0x00009400U; +} +static inline u32 timer_time_1_r(void) +{ + return 0x00009410U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_top_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_top_gv11b.h new file mode 100644 index 000000000..89e4aebbd --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_top_gv11b.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_top_gv11b_h_ +#define _hw_top_gv11b_h_ + +static inline u32 top_num_gpcs_r(void) +{ + return 0x00022430U; +} +static inline u32 top_num_gpcs_value_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 top_tpc_per_gpc_r(void) +{ + return 0x00022434U; +} +static inline u32 top_tpc_per_gpc_value_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 top_num_fbps_r(void) +{ + return 0x00022438U; +} +static inline u32 top_num_fbps_value_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 top_ltc_per_fbp_r(void) +{ + return 0x00022450U; +} +static inline u32 top_ltc_per_fbp_value_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 top_slices_per_ltc_r(void) +{ + return 0x0002245cU; +} +static inline u32 top_slices_per_ltc_value_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 top_num_ltcs_r(void) +{ + return 0x00022454U; +} +static inline u32 top_num_ces_r(void) +{ + return 0x00022444U; +} +static inline u32 top_num_ces_value_v(u32 r) +{ + return (r >> 0U) & 0x1fU; +} +static inline u32 top_device_info_r(u32 i) +{ + return 0x00022700U + i*4U; +} +static inline u32 top_device_info__size_1_v(void) +{ + return 0x00000040U; +} +static inline u32 top_device_info_chain_v(u32 r) +{ + return (r >> 31U) & 0x1U; +} +static inline u32 top_device_info_chain_enable_v(void) +{ + return 0x00000001U; +} +static inline u32 top_device_info_engine_enum_v(u32 r) +{ + return (r >> 26U) & 0xfU; +} +static inline u32 top_device_info_runlist_enum_v(u32 r) +{ + return (r >> 21U) & 0xfU; +} +static inline u32 top_device_info_intr_enum_v(u32 r) +{ + return (r >> 15U) & 0x1fU; +} +static inline u32 top_device_info_reset_enum_v(u32 r) +{ + return (r >> 9U) & 0x1fU; +} +static inline u32 top_device_info_type_enum_v(u32 r) +{ + return (r >> 2U) & 0x1fffffffU; +} +static inline u32 top_device_info_type_enum_graphics_v(void) +{ + return 0x00000000U; +} +static inline u32 top_device_info_type_enum_graphics_f(void) +{ + return 0x0U; +} +static inline u32 top_device_info_type_enum_copy2_v(void) +{ + return 0x00000003U; +} +static inline u32 top_device_info_type_enum_copy2_f(void) +{ + return 0xcU; +} +static inline u32 top_device_info_type_enum_lce_v(void) +{ + return 0x00000013U; +} +static inline u32 top_device_info_type_enum_lce_f(void) +{ + return 0x4cU; +} +static inline u32 top_device_info_engine_v(u32 r) +{ + return (r >> 5U) & 0x1U; +} +static inline u32 top_device_info_runlist_v(u32 r) +{ + return (r >> 4U) & 0x1U; +} +static inline u32 top_device_info_intr_v(u32 r) +{ + return (r >> 3U) & 0x1U; +} +static inline u32 top_device_info_reset_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 top_device_info_entry_v(u32 r) +{ + return (r >> 0U) & 0x3U; +} +static inline u32 top_device_info_entry_not_valid_v(void) +{ + return 0x00000000U; +} +static inline u32 top_device_info_entry_enum_v(void) +{ + return 0x00000002U; +} +static inline u32 top_device_info_entry_data_v(void) +{ + return 0x00000001U; +} +static inline u32 top_device_info_data_type_v(u32 r) +{ + return (r >> 30U) & 0x1U; +} +static inline u32 top_device_info_data_type_enum2_v(void) +{ + return 0x00000000U; +} +static inline u32 top_device_info_data_inst_id_v(u32 r) +{ + return (r >> 26U) & 0xfU; +} +static inline u32 top_device_info_data_pri_base_v(u32 r) +{ + return (r >> 12U) & 0xfffU; +} +static inline u32 top_device_info_data_pri_base_align_v(void) +{ + return 0x0000000cU; +} +static inline u32 top_device_info_data_fault_id_enum_v(u32 r) +{ + return (r >> 3U) & 0x7fU; +} +static inline u32 top_device_info_data_fault_id_v(u32 r) +{ + return (r >> 2U) & 0x1U; +} +static inline u32 top_device_info_data_fault_id_valid_v(void) +{ + return 0x00000001U; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_usermode_gv11b.h b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_usermode_gv11b.h new file mode 100644 index 000000000..e3749690b --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/hw/gv11b/hw_usermode_gv11b.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +/* + * Function naming determines intended use: + * + * _r(void) : Returns the offset for register . + * + * _o(void) : Returns the offset for element . + * + * _w(void) : Returns the word offset for word (4 byte) element . + * + * __s(void) : Returns size of field of register in bits. + * + * __f(u32 v) : Returns a value based on 'v' which has been shifted + * and masked to place it at field of register . This value + * can be |'d with others to produce a full register value for + * register . + * + * __m(void) : Returns a mask for field of register . This + * value can be ~'d and then &'d to clear the value of field for + * register . + * + * ___f(void) : Returns the constant value after being shifted + * to place it at field of register . This value can be |'d + * with others to produce a full register value for . + * + * __v(u32 r) : Returns the value of field from a full register + * value 'r' after being shifted to place its LSB at bit 0. + * This value is suitable for direct comparison with other unshifted + * values appropriate for use in field of register . + * + * ___v(void) : Returns the constant value for defined for + * field of register . This value is suitable for direct + * comparison with unshifted values appropriate for use in field + * of register . + */ +#ifndef _hw_usermode_gv11b_h_ +#define _hw_usermode_gv11b_h_ + +static inline u32 usermode_cfg0_r(void) +{ + return 0x00810000; +} +static inline u32 usermode_cfg0_usermode_class_id_f(u32 v) +{ + return (v & 0xffff) << 0; +} +static inline u32 usermode_cfg0_usermode_class_id_value_v(void) +{ + return 0x0000c361; +} +static inline u32 usermode_time_0_r(void) +{ + return 0x00810080; +} +static inline u32 usermode_time_0_nsec_f(u32 v) +{ + return (v & 0x7ffffff) << 5; +} +static inline u32 usermode_time_1_r(void) +{ + return 0x00810084; +} +static inline u32 usermode_time_1_nsec_f(u32 v) +{ + return (v & 0x1fffffff) << 0; +} +static inline u32 usermode_notify_channel_pending_r(void) +{ + return 0x00810090; +} +static inline u32 usermode_notify_channel_pending_id_f(u32 v) +{ + return (v & 0xffffffff) << 0; +} +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/io_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/io_t19x.h new file mode 100644 index 000000000..f8c7dbbdd --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/io_t19x.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef __NVGPU_IO_T19X_H__ +#define __NVGPU_IO_T19X_H__ + +#ifdef __KERNEL__ +#include "linux/io_t19x.h" +#endif + +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/io_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/linux/io_t19x.h new file mode 100644 index 000000000..f71a6ecf7 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/linux/io_t19x.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __NVGPU_IO_T19X_LINUX_H__ +#define __NVGPU_IO_T19X_LINUX_H__ + +#include + +struct gk20a; + +void gv11b_usermode_writel(struct gk20a *g, u32 r, u32 v); + +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/module_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/linux/module_t19x.h new file mode 100644 index 000000000..a105c6dc3 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/linux/module_t19x.h @@ -0,0 +1,27 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __NVGPU_MODULE_T19X_H__ +#define __NVGPU_MODULE_T19X_H__ + +struct gk20a; + +void t19x_init_support(struct gk20a *g); +void t19x_remove_support(struct gk20a *g); +void t19x_lockout_registers(struct gk20a *g); +void t19x_restore_registers(struct gk20a *g); + +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/os_linux_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/linux/os_linux_t19x.h new file mode 100644 index 000000000..a306bfb83 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/linux/os_linux_t19x.h @@ -0,0 +1,26 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef NVGPU_OS_LINUX_T19X_H +#define NVGPU_OS_LINUX_T19X_H + +#include + +struct nvgpu_os_linux_t19x { + void __iomem *usermode_regs; + void __iomem *usermode_regs_saved; +}; + +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/pci_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/linux/pci_t19x.h new file mode 100644 index 000000000..c94176cc7 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/linux/pci_t19x.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef __NVGPU_PCI_T19X_H__ +#define __NVGPU_PCI_T19X_H__ + +struct nvgpu_os_linux; + +void t19x_nvgpu_pci_init_support(struct nvgpu_os_linux *l); + +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvhost_t19x.h b/drivers/gpu/nvgpu/include/nvgpu/nvhost_t19x.h new file mode 100644 index 000000000..4b4998829 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/nvhost_t19x.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVGPU_NVHOST_T19X_H__ +#define __NVGPU_NVHOST_T19X_H__ + +#ifdef CONFIG_TEGRA_GK20A_NVHOST +#include + +struct nvgpu_nvhost_dev; + +int nvgpu_nvhost_syncpt_unit_interface_get_aperture( + struct nvgpu_nvhost_dev *nvhost_dev, + u64 *base, size_t *size); +u32 nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(u32 syncpt_id); + +#endif +#endif /* __NVGPU_NVHOST_T19X_H__ */ diff --git a/drivers/gpu/nvgpu/nvgpu_gpuid_t19x.h b/drivers/gpu/nvgpu/nvgpu_gpuid_t19x.h new file mode 100644 index 000000000..8689a535d --- /dev/null +++ b/drivers/gpu/nvgpu/nvgpu_gpuid_t19x.h @@ -0,0 +1,47 @@ +/* + * NVIDIA GPU ID functions, definitions. + * + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#ifndef _NVGPU_GPUID_T19X_H_ +#define _NVGPU_GPUID_T19X_H_ + +#define NVGPU_GPUID_GV11B 0x0000015B +#define NVGPU_GPUID_GV100 0x00000140 + +#define NVGPU_COMPAT_TEGRA_GV11B "nvidia,gv11b" +#define NVGPU_COMPAT_GENERIC_GV11B "nvidia,generic-gv11b" + + +#define TEGRA_19x_GPUID NVGPU_GPUID_GV11B +#define TEGRA_19x_GPUID_HAL gv11b_init_hal +#define TEGRA_19x_GPU_COMPAT_TEGRA NVGPU_COMPAT_TEGRA_GV11B +#define TEGRA_19x_GPU_COMPAT_GENERIC NVGPU_COMPAT_GENERIC_GV11B + +#define BIGGPU_19x_GPUID NVGPU_GPUID_GV100 +#define BIGGPU_19x_GPUID_HAL gv100_init_hal + +struct gpu_ops; +extern int gv11b_init_hal(struct gk20a *); +extern int gv100_init_hal(struct gk20a *); +extern struct gk20a_platform t19x_gpu_tegra_platform; + +#endif diff --git a/drivers/gpu/nvgpu/tsg_t19x.h b/drivers/gpu/nvgpu/tsg_t19x.h new file mode 100644 index 000000000..d1f47cc31 --- /dev/null +++ b/drivers/gpu/nvgpu/tsg_t19x.h @@ -0,0 +1,36 @@ +/* + * NVIDIA T19x TSG + * + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVGPU_TSG_T19X_H__ +#define __NVGPU_TSG_T19X_H__ + +#include + +struct tsg_t19x { + u32 num_active_tpcs; + u8 tpc_pg_enabled; + bool tpc_num_initialized; +}; + +#endif diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/platform_gv11b_vgpu_tegra.c b/drivers/gpu/nvgpu/vgpu/gv11b/platform_gv11b_vgpu_tegra.c new file mode 100644 index 000000000..fea473a71 --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/gv11b/platform_gv11b_vgpu_tegra.c @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "gk20a/gk20a.h" +#include "vgpu/clk_vgpu.h" +#include "common/linux/platform_gk20a.h" +#include "common/linux/os_linux.h" + +#include +#include + +#include + +static int gv11b_vgpu_probe(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct gk20a_platform *platform = dev_get_drvdata(dev); + struct resource *r; + void __iomem *regs; + struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(platform->g); + struct gk20a *g = platform->g; + int ret; + + r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "usermode"); + if (!r) { + dev_err(dev, "failed to get usermode regs\n"); + return -ENXIO; + } + regs = devm_ioremap_resource(dev, r); + if (IS_ERR(regs)) { + dev_err(dev, "failed to map usermode regs\n"); + return PTR_ERR(regs); + } + l->t19x.usermode_regs = regs; + +#ifdef CONFIG_TEGRA_GK20A_NVHOST + ret = nvgpu_get_nvhost_dev(g); + if (ret) { + l->t19x.usermode_regs = NULL; + return ret; + } + + ret = nvgpu_nvhost_syncpt_unit_interface_get_aperture(g->nvhost_dev, + &g->syncpt_unit_base, + &g->syncpt_unit_size); + if (ret) { + dev_err(dev, "Failed to get syncpt interface"); + return -ENOSYS; + } + g->syncpt_size = nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(1); + nvgpu_info(g, "syncpt_unit_base %llx syncpt_unit_size %zx size %x\n", + g->syncpt_unit_base, g->syncpt_unit_size, g->syncpt_size); +#endif + vgpu_init_clk_support(platform->g); + + return 0; +} + +struct gk20a_platform gv11b_vgpu_tegra_platform = { + .has_syncpoints = true, + .aggressive_sync_destroy_thresh = 64, + + /* power management configuration */ + .can_railgate_init = false, + .can_elpg_init = false, + .enable_slcg = false, + .enable_blcg = false, + .enable_elcg = false, + .enable_elpg = false, + .enable_aelpg = false, + .can_slcg = false, + .can_blcg = false, + .can_elcg = false, + + .ch_wdt_timeout_ms = 5000, + + .probe = gv11b_vgpu_probe, + + .clk_round_rate = vgpu_clk_round_rate, + .get_clk_freqs = vgpu_clk_get_freqs, + + /* frequency scaling configuration */ + .devfreq_governor = "userspace", + + .virtual_dev = true, +}; diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c new file mode 100644 index 000000000..ae9d52a7a --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.c @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include + +#include "vgpu/vgpu.h" +#include "gv11b/fifo_gv11b.h" +#include + +#include + +#ifdef CONFIG_TEGRA_GK20A_NVHOST +int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c, + u32 syncpt_id, struct nvgpu_mem *syncpt_buf) +{ + int err; + struct gk20a *g = c->g; + struct vm_gk20a *vm = c->vm; + struct tegra_vgpu_cmd_msg msg = {}; + struct tegra_vgpu_map_syncpt_params *p = &msg.params.t19x.map_syncpt; + + /* + * Add ro map for complete sync point shim range in vm. + * All channels sharing same vm will share same ro mapping. + * Create rw map for current channel sync point. + */ + if (!vm->syncpt_ro_map_gpu_va) { + vm->syncpt_ro_map_gpu_va = __nvgpu_vm_alloc_va(vm, + g->syncpt_unit_size, + gmmu_page_size_kernel); + if (!vm->syncpt_ro_map_gpu_va) { + nvgpu_err(g, "allocating read-only va space failed"); + return -ENOMEM; + } + + msg.cmd = TEGRA_VGPU_CMD_MAP_SYNCPT; + msg.handle = vgpu_get_handle(g); + p->as_handle = c->vm->handle; + p->gpu_va = vm->syncpt_ro_map_gpu_va; + p->len = g->syncpt_unit_size; + p->offset = 0; + p->prot = TEGRA_VGPU_MAP_PROT_READ_ONLY; + err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); + err = err ? err : msg.ret; + if (err) { + nvgpu_err(g, + "mapping read-only va space failed err %d", + err); + __nvgpu_vm_free_va(c->vm, vm->syncpt_ro_map_gpu_va, + gmmu_page_size_kernel); + vm->syncpt_ro_map_gpu_va = 0; + return err; + } + } + + syncpt_buf->gpu_va = __nvgpu_vm_alloc_va(c->vm, g->syncpt_size, + gmmu_page_size_kernel); + if (!syncpt_buf->gpu_va) { + nvgpu_err(g, "allocating syncpt va space failed"); + return -ENOMEM; + } + + msg.cmd = TEGRA_VGPU_CMD_MAP_SYNCPT; + msg.handle = vgpu_get_handle(g); + p->as_handle = c->vm->handle; + p->gpu_va = syncpt_buf->gpu_va; + p->len = g->syncpt_size; + p->offset = + nvgpu_nvhost_syncpt_unit_interface_get_byte_offset(syncpt_id); + p->prot = TEGRA_VGPU_MAP_PROT_NONE; + err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); + err = err ? err : msg.ret; + if (err) { + nvgpu_err(g, "mapping syncpt va space failed err %d", err); + __nvgpu_vm_free_va(c->vm, syncpt_buf->gpu_va, + gmmu_page_size_kernel); + return err; + } + + return 0; +} +#endif /* CONFIG_TEGRA_GK20A_NVHOST */ + +int vgpu_gv11b_init_fifo_setup_hw(struct gk20a *g) +{ + struct fifo_gk20a *f = &g->fifo; + int err; + + err = vgpu_get_attribute(vgpu_get_handle(g), + TEGRA_VGPU_ATTRIB_MAX_SUBCTX_COUNT, + &f->t19x.max_subctx_count); + if (err) { + nvgpu_err(g, "get max_subctx_count failed %d", err); + return err; + } + + return 0; +} diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.h new file mode 100644 index 000000000..bea935d32 --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_fifo_gv11b.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _VGPU_FIFO_GV11B_H_ +#define _VGPU_FIFO_GV11B_H_ + +struct gk20a; + +int vgpu_gv11b_init_fifo_setup_hw(struct gk20a *g); +int vgpu_gv11b_fifo_alloc_syncpt_buf(struct channel_gk20a *c, + u32 syncpt_id, struct nvgpu_mem *syncpt_buf); +#endif diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.c new file mode 100644 index 000000000..899522218 --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.c @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include + +#include "vgpu_subctx_gv11b.h" + +int vgpu_gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va) +{ + int err; + + err = vgpu_gv11b_alloc_subctx_header(c); + if (err) + return err; + + err = vgpu_gr_commit_inst(c, gpu_va); + if (err) + vgpu_gv11b_free_subctx_header(c); + + return err; +} diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.h new file mode 100644 index 000000000..562198ca3 --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gr_gv11b.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _VGPU_GR_GV11B_H_ +#define _VGPU_GR_GV11B_H_ + +struct channel_gk20a; + +int vgpu_gr_gv11b_commit_inst(struct channel_gk20a *c, u64 gpu_va); + +#endif diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c new file mode 100644 index 000000000..feac195ec --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.c @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include "gk20a/gk20a.h" + +#include +#include + +#include "vgpu/vgpu.h" +#include "vgpu_gv11b.h" + +int vgpu_gv11b_init_gpu_characteristics(struct gk20a *g) +{ + int err; + + gk20a_dbg_fn(""); + + err = vgpu_init_gpu_characteristics(g); + if (err) { + nvgpu_err(g, "vgpu_init_gpu_characteristics failed, err %d\n", err); + return err; + } + + __nvgpu_set_enabled(g, NVGPU_SUPPORT_TSG_SUBCONTEXTS, true); + + return 0; +} diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.h new file mode 100644 index 000000000..9413904bd --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_gv11b.h @@ -0,0 +1,21 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef _VGPU_GV11B_H_ +#define _VGPU_GV11B_H_ + +struct gk20a; + +int vgpu_gv11b_init_gpu_characteristics(struct gk20a *g); + +#endif diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c new file mode 100644 index 000000000..17d6f0493 --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_hal_gv11b.c @@ -0,0 +1,642 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "vgpu_gv11b.h" +#include "vgpu_gr_gv11b.h" +#include "vgpu_fifo_gv11b.h" +#include "vgpu_subctx_gv11b.h" +#include "vgpu_tsg_gv11b.h" + +#include +#include +#include +#include +#include + +static const struct gpu_ops vgpu_gv11b_ops = { + .ltc = { + .determine_L2_size_bytes = vgpu_determine_L2_size_bytes, + .set_zbc_s_entry = gv11b_ltc_set_zbc_stencil_entry, + .set_zbc_color_entry = gm20b_ltc_set_zbc_color_entry, + .set_zbc_depth_entry = gm20b_ltc_set_zbc_depth_entry, + .init_cbc = NULL, + .init_fs_state = vgpu_ltc_init_fs_state, + .init_comptags = vgpu_ltc_init_comptags, + .cbc_ctrl = NULL, + .isr = gv11b_ltc_isr, + .cbc_fix_config = gv11b_ltc_cbc_fix_config, + .flush = gm20b_flush_ltc, + .set_enabled = gp10b_ltc_set_enabled, + }, + .ce2 = { + .isr_stall = gv11b_ce_isr, + .isr_nonstall = gp10b_ce_nonstall_isr, + .get_num_pce = vgpu_ce_get_num_pce, + }, + .gr = { + .init_gpc_mmu = gr_gv11b_init_gpc_mmu, + .bundle_cb_defaults = gr_gv11b_bundle_cb_defaults, + .cb_size_default = gr_gv11b_cb_size_default, + .calc_global_ctx_buffer_size = + gr_gv11b_calc_global_ctx_buffer_size, + .commit_global_attrib_cb = gr_gv11b_commit_global_attrib_cb, + .commit_global_bundle_cb = gr_gp10b_commit_global_bundle_cb, + .commit_global_cb_manager = gr_gp10b_commit_global_cb_manager, + .commit_global_pagepool = gr_gp10b_commit_global_pagepool, + .handle_sw_method = gr_gv11b_handle_sw_method, + .set_alpha_circular_buffer_size = + gr_gv11b_set_alpha_circular_buffer_size, + .set_circular_buffer_size = gr_gv11b_set_circular_buffer_size, + .enable_hww_exceptions = gr_gv11b_enable_hww_exceptions, + .is_valid_class = gr_gv11b_is_valid_class, + .is_valid_gfx_class = gr_gv11b_is_valid_gfx_class, + .is_valid_compute_class = gr_gv11b_is_valid_compute_class, + .get_sm_dsm_perf_regs = gv11b_gr_get_sm_dsm_perf_regs, + .get_sm_dsm_perf_ctrl_regs = gv11b_gr_get_sm_dsm_perf_ctrl_regs, + .init_fs_state = vgpu_gm20b_init_fs_state, + .set_hww_esr_report_mask = gv11b_gr_set_hww_esr_report_mask, + .falcon_load_ucode = gr_gm20b_load_ctxsw_ucode_segments, + .load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode, + .set_gpc_tpc_mask = gr_gv11b_set_gpc_tpc_mask, + .get_gpc_tpc_mask = vgpu_gr_get_gpc_tpc_mask, + .free_channel_ctx = vgpu_gr_free_channel_ctx, + .alloc_obj_ctx = vgpu_gr_alloc_obj_ctx, + .bind_ctxsw_zcull = vgpu_gr_bind_ctxsw_zcull, + .get_zcull_info = vgpu_gr_get_zcull_info, + .is_tpc_addr = gr_gm20b_is_tpc_addr, + .get_tpc_num = gr_gm20b_get_tpc_num, + .detect_sm_arch = vgpu_gr_detect_sm_arch, + .add_zbc_color = gr_gp10b_add_zbc_color, + .add_zbc_depth = gr_gp10b_add_zbc_depth, + .zbc_set_table = vgpu_gr_add_zbc, + .zbc_query_table = vgpu_gr_query_zbc, + .pmu_save_zbc = gk20a_pmu_save_zbc, + .add_zbc = gr_gk20a_add_zbc, + .pagepool_default_size = gr_gv11b_pagepool_default_size, + .init_ctx_state = vgpu_gr_gp10b_init_ctx_state, + .alloc_gr_ctx = vgpu_gr_gp10b_alloc_gr_ctx, + .free_gr_ctx = vgpu_gr_gp10b_free_gr_ctx, + .update_ctxsw_preemption_mode = + gr_gp10b_update_ctxsw_preemption_mode, + .dump_gr_regs = NULL, + .update_pc_sampling = gr_gm20b_update_pc_sampling, + .get_fbp_en_mask = vgpu_gr_get_fbp_en_mask, + .get_max_ltc_per_fbp = vgpu_gr_get_max_ltc_per_fbp, + .get_max_lts_per_ltc = vgpu_gr_get_max_lts_per_ltc, + .get_rop_l2_en_mask = vgpu_gr_rop_l2_en_mask, + .get_max_fbps_count = vgpu_gr_get_max_fbps_count, + .init_sm_dsm_reg_info = gv11b_gr_init_sm_dsm_reg_info, + .wait_empty = gr_gv11b_wait_empty, + .init_cyclestats = vgpu_gr_gm20b_init_cyclestats, + .set_sm_debug_mode = vgpu_gr_set_sm_debug_mode, + .enable_cde_in_fecs = gr_gm20b_enable_cde_in_fecs, + .bpt_reg_info = gv11b_gr_bpt_reg_info, + .get_access_map = gr_gv11b_get_access_map, + .handle_fecs_error = gr_gv11b_handle_fecs_error, + .handle_sm_exception = gr_gk20a_handle_sm_exception, + .handle_tex_exception = gr_gv11b_handle_tex_exception, + .enable_gpc_exceptions = gr_gv11b_enable_gpc_exceptions, + .enable_exceptions = gr_gv11b_enable_exceptions, + .get_lrf_tex_ltc_dram_override = get_ecc_override_val, + .update_smpc_ctxsw_mode = vgpu_gr_update_smpc_ctxsw_mode, + .update_hwpm_ctxsw_mode = vgpu_gr_update_hwpm_ctxsw_mode, + .record_sm_error_state = gv11b_gr_record_sm_error_state, + .update_sm_error_state = gv11b_gr_update_sm_error_state, + .clear_sm_error_state = vgpu_gr_clear_sm_error_state, + .suspend_contexts = vgpu_gr_suspend_contexts, + .resume_contexts = vgpu_gr_resume_contexts, + .get_preemption_mode_flags = gr_gp10b_get_preemption_mode_flags, + .init_sm_id_table = gr_gv100_init_sm_id_table, + .load_smid_config = gr_gv11b_load_smid_config, + .program_sm_id_numbering = gr_gv11b_program_sm_id_numbering, + .is_ltcs_ltss_addr = gr_gm20b_is_ltcs_ltss_addr, + .is_ltcn_ltss_addr = gr_gm20b_is_ltcn_ltss_addr, + .split_lts_broadcast_addr = gr_gm20b_split_lts_broadcast_addr, + .split_ltc_broadcast_addr = gr_gm20b_split_ltc_broadcast_addr, + .setup_rop_mapping = gr_gv11b_setup_rop_mapping, + .program_zcull_mapping = gr_gv11b_program_zcull_mapping, + .commit_global_timeslice = gr_gv11b_commit_global_timeslice, + .commit_inst = vgpu_gr_gv11b_commit_inst, + .write_zcull_ptr = gr_gv11b_write_zcull_ptr, + .write_pm_ptr = gr_gv11b_write_pm_ptr, + .init_elcg_mode = gr_gv11b_init_elcg_mode, + .load_tpc_mask = gr_gv11b_load_tpc_mask, + .inval_icache = gr_gk20a_inval_icache, + .trigger_suspend = gv11b_gr_sm_trigger_suspend, + .wait_for_pause = gr_gk20a_wait_for_pause, + .resume_from_pause = gv11b_gr_resume_from_pause, + .clear_sm_errors = gr_gk20a_clear_sm_errors, + .tpc_enabled_exceptions = gr_gk20a_tpc_enabled_exceptions, + .get_esr_sm_sel = gv11b_gr_get_esr_sm_sel, + .sm_debugger_attached = gv11b_gr_sm_debugger_attached, + .suspend_single_sm = gv11b_gr_suspend_single_sm, + .suspend_all_sms = gv11b_gr_suspend_all_sms, + .resume_single_sm = gv11b_gr_resume_single_sm, + .resume_all_sms = gv11b_gr_resume_all_sms, + .get_sm_hww_warp_esr = gv11b_gr_get_sm_hww_warp_esr, + .get_sm_hww_global_esr = gv11b_gr_get_sm_hww_global_esr, + .get_sm_no_lock_down_hww_global_esr_mask = + gv11b_gr_get_sm_no_lock_down_hww_global_esr_mask, + .lock_down_sm = gv11b_gr_lock_down_sm, + .wait_for_sm_lock_down = gv11b_gr_wait_for_sm_lock_down, + .clear_sm_hww = gv11b_gr_clear_sm_hww, + .init_ovr_sm_dsm_perf = gv11b_gr_init_ovr_sm_dsm_perf, + .get_ovr_perf_regs = gv11b_gr_get_ovr_perf_regs, + .disable_rd_coalesce = gm20a_gr_disable_rd_coalesce, + .set_boosted_ctx = NULL, + .set_preemption_mode = vgpu_gr_gp10b_set_preemption_mode, + .set_czf_bypass = NULL, + .pre_process_sm_exception = gr_gv11b_pre_process_sm_exception, + .set_preemption_buffer_va = gr_gv11b_set_preemption_buffer_va, + .init_preemption_state = NULL, + .update_boosted_ctx = NULL, + .set_bes_crop_debug3 = gr_gp10b_set_bes_crop_debug3, + .create_gr_sysfs = gr_gv11b_create_sysfs, + .set_ctxsw_preemption_mode = vgpu_gr_gp10b_set_ctxsw_preemption_mode, + .is_etpc_addr = gv11b_gr_pri_is_etpc_addr, + .egpc_etpc_priv_addr_table = gv11b_gr_egpc_etpc_priv_addr_table, + .handle_tpc_mpc_exception = gr_gv11b_handle_tpc_mpc_exception, + .zbc_s_query_table = gr_gv11b_zbc_s_query_table, + .load_zbc_s_default_tbl = gr_gv11b_load_stencil_default_tbl, + .handle_gpc_gpcmmu_exception = + gr_gv11b_handle_gpc_gpcmmu_exception, + .add_zbc_type_s = gr_gv11b_add_zbc_type_s, + .get_egpc_base = gv11b_gr_get_egpc_base, + .get_egpc_etpc_num = gv11b_gr_get_egpc_etpc_num, + .handle_gpc_gpccs_exception = + gr_gv11b_handle_gpc_gpccs_exception, + .load_zbc_s_tbl = gr_gv11b_load_stencil_tbl, + .access_smpc_reg = gv11b_gr_access_smpc_reg, + .is_egpc_addr = gv11b_gr_pri_is_egpc_addr, + .add_zbc_s = gr_gv11b_add_zbc_stencil, + .handle_gcc_exception = gr_gv11b_handle_gcc_exception, + .init_sw_veid_bundle = gr_gv11b_init_sw_veid_bundle, + .handle_tpc_sm_ecc_exception = + gr_gv11b_handle_tpc_sm_ecc_exception, + .decode_egpc_addr = gv11b_gr_decode_egpc_addr, + .init_ctxsw_hdr_data = gr_gp10b_init_ctxsw_hdr_data, + }, + .fb = { + .reset = gv11b_fb_reset, + .init_hw = gk20a_fb_init_hw, + .init_fs_state = gv11b_fb_init_fs_state, + .init_cbc = gv11b_fb_init_cbc, + .set_mmu_page_size = gm20b_fb_set_mmu_page_size, + .set_use_full_comp_tag_line = + gm20b_fb_set_use_full_comp_tag_line, + .compression_page_size = gp10b_fb_compression_page_size, + .compressible_page_size = gp10b_fb_compressible_page_size, + .vpr_info_fetch = gm20b_fb_vpr_info_fetch, + .dump_vpr_wpr_info = gm20b_fb_dump_vpr_wpr_info, + .read_wpr_info = gm20b_fb_read_wpr_info, + .is_debug_mode_enabled = NULL, + .set_debug_mode = vgpu_mm_mmu_set_debug_mode, + .tlb_invalidate = vgpu_mm_tlb_invalidate, + .hub_isr = gv11b_fb_hub_isr, + }, + .clock_gating = { + .slcg_bus_load_gating_prod = + gv11b_slcg_bus_load_gating_prod, + .slcg_ce2_load_gating_prod = + gv11b_slcg_ce2_load_gating_prod, + .slcg_chiplet_load_gating_prod = + gv11b_slcg_chiplet_load_gating_prod, + .slcg_ctxsw_firmware_load_gating_prod = + gv11b_slcg_ctxsw_firmware_load_gating_prod, + .slcg_fb_load_gating_prod = + gv11b_slcg_fb_load_gating_prod, + .slcg_fifo_load_gating_prod = + gv11b_slcg_fifo_load_gating_prod, + .slcg_gr_load_gating_prod = + gr_gv11b_slcg_gr_load_gating_prod, + .slcg_ltc_load_gating_prod = + ltc_gv11b_slcg_ltc_load_gating_prod, + .slcg_perf_load_gating_prod = + gv11b_slcg_perf_load_gating_prod, + .slcg_priring_load_gating_prod = + gv11b_slcg_priring_load_gating_prod, + .slcg_pmu_load_gating_prod = + gv11b_slcg_pmu_load_gating_prod, + .slcg_therm_load_gating_prod = + gv11b_slcg_therm_load_gating_prod, + .slcg_xbar_load_gating_prod = + gv11b_slcg_xbar_load_gating_prod, + .blcg_bus_load_gating_prod = + gv11b_blcg_bus_load_gating_prod, + .blcg_ce_load_gating_prod = + gv11b_blcg_ce_load_gating_prod, + .blcg_ctxsw_firmware_load_gating_prod = + gv11b_blcg_ctxsw_firmware_load_gating_prod, + .blcg_fb_load_gating_prod = + gv11b_blcg_fb_load_gating_prod, + .blcg_fifo_load_gating_prod = + gv11b_blcg_fifo_load_gating_prod, + .blcg_gr_load_gating_prod = + gv11b_blcg_gr_load_gating_prod, + .blcg_ltc_load_gating_prod = + gv11b_blcg_ltc_load_gating_prod, + .blcg_pwr_csb_load_gating_prod = + gv11b_blcg_pwr_csb_load_gating_prod, + .blcg_pmu_load_gating_prod = + gv11b_blcg_pmu_load_gating_prod, + .blcg_xbar_load_gating_prod = + gv11b_blcg_xbar_load_gating_prod, + .pg_gr_load_gating_prod = + gr_gv11b_pg_gr_load_gating_prod, + }, + .fifo = { + .init_fifo_setup_hw = vgpu_gv11b_init_fifo_setup_hw, + .bind_channel = vgpu_channel_bind, + .unbind_channel = vgpu_channel_unbind, + .disable_channel = vgpu_channel_disable, + .enable_channel = vgpu_channel_enable, + .alloc_inst = vgpu_channel_alloc_inst, + .free_inst = vgpu_channel_free_inst, + .setup_ramfc = vgpu_channel_setup_ramfc, + .channel_set_timeslice = vgpu_channel_set_timeslice, + .default_timeslice_us = vgpu_fifo_default_timeslice_us, + .setup_userd = gk20a_fifo_setup_userd, + .userd_gp_get = gv11b_userd_gp_get, + .userd_gp_put = gv11b_userd_gp_put, + .userd_pb_get = gv11b_userd_pb_get, + .pbdma_acquire_val = gk20a_fifo_pbdma_acquire_val, + .preempt_channel = vgpu_fifo_preempt_channel, + .preempt_tsg = vgpu_fifo_preempt_tsg, + .enable_tsg = vgpu_enable_tsg, + .disable_tsg = gk20a_disable_tsg, + .tsg_verify_channel_status = NULL, + .tsg_verify_status_ctx_reload = NULL, + /* TODO: implement it for CE fault */ + .tsg_verify_status_faulted = NULL, + .update_runlist = vgpu_fifo_update_runlist, + .trigger_mmu_fault = NULL, + .get_mmu_fault_info = NULL, + .wait_engine_idle = vgpu_fifo_wait_engine_idle, + .get_num_fifos = gv11b_fifo_get_num_fifos, + .get_pbdma_signature = gp10b_fifo_get_pbdma_signature, + .set_runlist_interleave = vgpu_fifo_set_runlist_interleave, + .tsg_set_timeslice = vgpu_tsg_set_timeslice, + .tsg_open = vgpu_tsg_open, + .force_reset_ch = vgpu_fifo_force_reset_ch, + .engine_enum_from_type = gp10b_fifo_engine_enum_from_type, + .device_info_data_parse = gp10b_device_info_data_parse, + .eng_runlist_base_size = fifo_eng_runlist_base__size_1_v, + .init_engine_info = vgpu_fifo_init_engine_info, + .runlist_entry_size = ram_rl_entry_size_v, + .get_tsg_runlist_entry = gv11b_get_tsg_runlist_entry, + .get_ch_runlist_entry = gv11b_get_ch_runlist_entry, + .is_fault_engine_subid_gpc = gv11b_is_fault_engine_subid_gpc, + .dump_pbdma_status = gk20a_dump_pbdma_status, + .dump_eng_status = gv11b_dump_eng_status, + .dump_channel_status_ramfc = gv11b_dump_channel_status_ramfc, + .intr_0_error_mask = gv11b_fifo_intr_0_error_mask, + .is_preempt_pending = gv11b_fifo_is_preempt_pending, + .init_pbdma_intr_descs = gv11b_fifo_init_pbdma_intr_descs, + .reset_enable_hw = gv11b_init_fifo_reset_enable_hw, + .teardown_ch_tsg = gv11b_fifo_teardown_ch_tsg, + .handle_sched_error = gv11b_fifo_handle_sched_error, + .handle_pbdma_intr_0 = gv11b_fifo_handle_pbdma_intr_0, + .handle_pbdma_intr_1 = gv11b_fifo_handle_pbdma_intr_1, + .init_eng_method_buffers = gv11b_fifo_init_eng_method_buffers, + .deinit_eng_method_buffers = + gv11b_fifo_deinit_eng_method_buffers, + .tsg_bind_channel = vgpu_gv11b_tsg_bind_channel, + .tsg_unbind_channel = vgpu_tsg_unbind_channel, +#ifdef CONFIG_TEGRA_GK20A_NVHOST + .alloc_syncpt_buf = vgpu_gv11b_fifo_alloc_syncpt_buf, + .free_syncpt_buf = gv11b_fifo_free_syncpt_buf, + .add_syncpt_wait_cmd = gv11b_fifo_add_syncpt_wait_cmd, + .get_syncpt_wait_cmd_size = gv11b_fifo_get_syncpt_wait_cmd_size, + .add_syncpt_incr_cmd = gv11b_fifo_add_syncpt_incr_cmd, + .get_syncpt_incr_cmd_size = gv11b_fifo_get_syncpt_incr_cmd_size, +#endif + .resetup_ramfc = NULL, + .reschedule_runlist = NULL, + .device_info_fault_id = top_device_info_data_fault_id_enum_v, + .free_channel_ctx_header = vgpu_gv11b_free_subctx_header, + .preempt_ch_tsg = gv11b_fifo_preempt_ch_tsg, + .handle_ctxsw_timeout = gv11b_fifo_handle_ctxsw_timeout, + }, + .gr_ctx = { + .get_netlist_name = gr_gv11b_get_netlist_name, + .is_fw_defined = gr_gv11b_is_firmware_defined, + }, +#ifdef CONFIG_GK20A_CTXSW_TRACE + .fecs_trace = { + .alloc_user_buffer = NULL, + .free_user_buffer = NULL, + .mmap_user_buffer = NULL, + .init = NULL, + .deinit = NULL, + .enable = NULL, + .disable = NULL, + .is_enabled = NULL, + .reset = NULL, + .flush = NULL, + .poll = NULL, + .bind_channel = NULL, + .unbind_channel = NULL, + .max_entries = NULL, + }, +#endif /* CONFIG_GK20A_CTXSW_TRACE */ + .mm = { + /* FIXME: add support for sparse mappings */ + .support_sparse = NULL, + .gmmu_map = vgpu_gp10b_locked_gmmu_map, + .gmmu_unmap = vgpu_locked_gmmu_unmap, + .vm_bind_channel = vgpu_vm_bind_channel, + .fb_flush = vgpu_mm_fb_flush, + .l2_invalidate = vgpu_mm_l2_invalidate, + .l2_flush = vgpu_mm_l2_flush, + .cbc_clean = gk20a_mm_cbc_clean, + .set_big_page_size = gm20b_mm_set_big_page_size, + .get_big_page_sizes = gm20b_mm_get_big_page_sizes, + .get_default_big_page_size = gp10b_mm_get_default_big_page_size, + .gpu_phys_addr = gm20b_gpu_phys_addr, + .get_iommu_bit = gk20a_mm_get_iommu_bit, + .get_mmu_levels = gp10b_mm_get_mmu_levels, + .init_pdb = gp10b_mm_init_pdb, + .init_mm_setup_hw = vgpu_gp10b_init_mm_setup_hw, + .is_bar1_supported = gv11b_mm_is_bar1_supported, + .init_inst_block = gv11b_init_inst_block, + .mmu_fault_pending = gv11b_mm_mmu_fault_pending, + .get_kind_invalid = gm20b_get_kind_invalid, + .get_kind_pitch = gm20b_get_kind_pitch, + .init_bar2_vm = gb10b_init_bar2_vm, + .init_bar2_mm_hw_setup = gv11b_init_bar2_mm_hw_setup, + .remove_bar2_vm = gv11b_mm_remove_bar2_vm, + .fault_info_mem_destroy = gv11b_mm_fault_info_mem_destroy, + }, + .therm = { + .init_therm_setup_hw = gp10b_init_therm_setup_hw, + .elcg_init_idle_filters = gv11b_elcg_init_idle_filters, + }, + .pmu = { + .pmu_setup_elpg = gp10b_pmu_setup_elpg, + .pmu_get_queue_head = pwr_pmu_queue_head_r, + .pmu_get_queue_head_size = pwr_pmu_queue_head__size_1_v, + .pmu_get_queue_tail = pwr_pmu_queue_tail_r, + .pmu_get_queue_tail_size = pwr_pmu_queue_tail__size_1_v, + .pmu_queue_head = gk20a_pmu_queue_head, + .pmu_queue_tail = gk20a_pmu_queue_tail, + .pmu_msgq_tail = gk20a_pmu_msgq_tail, + .pmu_mutex_size = pwr_pmu_mutex__size_1_v, + .pmu_mutex_acquire = gk20a_pmu_mutex_acquire, + .pmu_mutex_release = gk20a_pmu_mutex_release, + .write_dmatrfbase = gp10b_write_dmatrfbase, + .pmu_elpg_statistics = gp106_pmu_elpg_statistics, + .pmu_pg_init_param = gv11b_pg_gr_init, + .pmu_pg_supported_engines_list = gk20a_pmu_pg_engines_list, + .pmu_pg_engines_feature_list = gk20a_pmu_pg_feature_list, + .dump_secure_fuses = pmu_dump_security_fuses_gp10b, + .reset_engine = gp106_pmu_engine_reset, + .is_engine_in_reset = gp106_pmu_is_engine_in_reset, + .pmu_nsbootstrap = gv11b_pmu_bootstrap, + .pmu_pg_set_sub_feature_mask = gv11b_pg_set_subfeature_mask, + .is_pmu_supported = gv11b_is_pmu_supported, + }, + .regops = { + .get_global_whitelist_ranges = + gv11b_get_global_whitelist_ranges, + .get_global_whitelist_ranges_count = + gv11b_get_global_whitelist_ranges_count, + .get_context_whitelist_ranges = + gv11b_get_context_whitelist_ranges, + .get_context_whitelist_ranges_count = + gv11b_get_context_whitelist_ranges_count, + .get_runcontrol_whitelist = gv11b_get_runcontrol_whitelist, + .get_runcontrol_whitelist_count = + gv11b_get_runcontrol_whitelist_count, + .get_runcontrol_whitelist_ranges = + gv11b_get_runcontrol_whitelist_ranges, + .get_runcontrol_whitelist_ranges_count = + gv11b_get_runcontrol_whitelist_ranges_count, + .get_qctl_whitelist = gv11b_get_qctl_whitelist, + .get_qctl_whitelist_count = gv11b_get_qctl_whitelist_count, + .get_qctl_whitelist_ranges = gv11b_get_qctl_whitelist_ranges, + .get_qctl_whitelist_ranges_count = + gv11b_get_qctl_whitelist_ranges_count, + .apply_smpc_war = gv11b_apply_smpc_war, + }, + .mc = { + .intr_enable = mc_gv11b_intr_enable, + .intr_unit_config = mc_gp10b_intr_unit_config, + .isr_stall = mc_gp10b_isr_stall, + .intr_stall = mc_gp10b_intr_stall, + .intr_stall_pause = mc_gp10b_intr_stall_pause, + .intr_stall_resume = mc_gp10b_intr_stall_resume, + .intr_nonstall = mc_gp10b_intr_nonstall, + .intr_nonstall_pause = mc_gp10b_intr_nonstall_pause, + .intr_nonstall_resume = mc_gp10b_intr_nonstall_resume, + .enable = gk20a_mc_enable, + .disable = gk20a_mc_disable, + .reset = gk20a_mc_reset, + .boot_0 = gk20a_mc_boot_0, + .is_intr1_pending = mc_gp10b_is_intr1_pending, + .is_intr_hub_pending = gv11b_mc_is_intr_hub_pending, + }, + .debug = { + .show_dump = NULL, + }, + .dbg_session_ops = { + .exec_reg_ops = vgpu_exec_regops, + .dbg_set_powergate = vgpu_dbg_set_powergate, + .check_and_set_global_reservation = + vgpu_check_and_set_global_reservation, + .check_and_set_context_reservation = + vgpu_check_and_set_context_reservation, + .release_profiler_reservation = + vgpu_release_profiler_reservation, + .perfbuffer_enable = vgpu_perfbuffer_enable, + .perfbuffer_disable = vgpu_perfbuffer_disable, + }, + .bus = { + .init_hw = gk20a_bus_init_hw, + .isr = gk20a_bus_isr, + .read_ptimer = vgpu_read_ptimer, + .get_timestamps_zipper = vgpu_get_timestamps_zipper, + .bar1_bind = NULL, + }, +#if defined(CONFIG_GK20A_CYCLE_STATS) + .css = { + .enable_snapshot = vgpu_css_enable_snapshot_buffer, + .disable_snapshot = vgpu_css_release_snapshot_buffer, + .check_data_available = vgpu_css_flush_snapshots, + .set_handled_snapshots = NULL, + .allocate_perfmon_ids = NULL, + .release_perfmon_ids = NULL, + }, +#endif + .falcon = { + .falcon_hal_sw_init = gk20a_falcon_hal_sw_init, + }, + .priv_ring = { + .isr = gp10b_priv_ring_isr, + }, + .chip_init_gpu_characteristics = vgpu_gv11b_init_gpu_characteristics, + .get_litter_value = gv11b_get_litter_value, +}; + +int vgpu_gv11b_init_hal(struct gk20a *g) +{ + struct gpu_ops *gops = &g->ops; + u32 val; + bool priv_security; + + gops->ltc = vgpu_gv11b_ops.ltc; + gops->ce2 = vgpu_gv11b_ops.ce2; + gops->gr = vgpu_gv11b_ops.gr; + gops->fb = vgpu_gv11b_ops.fb; + gops->clock_gating = vgpu_gv11b_ops.clock_gating; + gops->fifo = vgpu_gv11b_ops.fifo; + gops->gr_ctx = vgpu_gv11b_ops.gr_ctx; + gops->mm = vgpu_gv11b_ops.mm; + gops->fecs_trace = vgpu_gv11b_ops.fecs_trace; + gops->therm = vgpu_gv11b_ops.therm; + gops->pmu = vgpu_gv11b_ops.pmu; + gops->regops = vgpu_gv11b_ops.regops; + gops->mc = vgpu_gv11b_ops.mc; + gops->debug = vgpu_gv11b_ops.debug; + gops->dbg_session_ops = vgpu_gv11b_ops.dbg_session_ops; + gops->bus = vgpu_gv11b_ops.bus; +#if defined(CONFIG_GK20A_CYCLE_STATS) + gops->css = vgpu_gv11b_ops.css; +#endif + gops->falcon = vgpu_gv11b_ops.falcon; + gops->priv_ring = vgpu_gv11b_ops.priv_ring; + + /* Lone functions */ + gops->chip_init_gpu_characteristics = + vgpu_gv11b_ops.chip_init_gpu_characteristics; + gops->get_litter_value = vgpu_gv11b_ops.get_litter_value; + + val = gk20a_readl(g, fuse_opt_priv_sec_en_r()); + if (val) { + priv_security = true; + pr_err("priv security is enabled\n"); + } else { + priv_security = false; + pr_err("priv security is disabled\n"); + } + __nvgpu_set_enabled(g, NVGPU_GR_USE_DMA_FOR_FW_BOOTSTRAP, false); + __nvgpu_set_enabled(g, NVGPU_SEC_PRIVSECURITY, priv_security); + __nvgpu_set_enabled(g, NVGPU_SEC_SECUREGPCCS, priv_security); + + /* priv security dependent ops */ + if (nvgpu_is_enabled(g, NVGPU_SEC_PRIVSECURITY)) { + /* Add in ops from gm20b acr */ + gops->pmu.prepare_ucode = gp106_prepare_ucode_blob, + gops->pmu.pmu_setup_hw_and_bootstrap = gv11b_bootstrap_hs_flcn, + gops->pmu.get_wpr = gm20b_wpr_info, + gops->pmu.alloc_blob_space = gm20b_alloc_blob_space, + gops->pmu.pmu_populate_loader_cfg = + gp106_pmu_populate_loader_cfg, + gops->pmu.flcn_populate_bl_dmem_desc = + gp106_flcn_populate_bl_dmem_desc, + gops->pmu.falcon_wait_for_halt = pmu_wait_for_halt, + gops->pmu.falcon_clear_halt_interrupt_status = + clear_halt_interrupt_status, + gops->pmu.init_falcon_setup_hw = gv11b_init_pmu_setup_hw1, + + gops->pmu.init_wpr_region = gm20b_pmu_init_acr; + gops->pmu.load_lsfalcon_ucode = gp10b_load_falcon_ucode; + gops->pmu.is_lazy_bootstrap = gv11b_is_lazy_bootstrap, + gops->pmu.is_priv_load = gv11b_is_priv_load, + + gops->gr.load_ctxsw_ucode = gr_gm20b_load_ctxsw_ucode; + } else { + /* Inherit from gk20a */ + gops->pmu.prepare_ucode = nvgpu_pmu_prepare_ns_ucode_blob, + gops->pmu.pmu_setup_hw_and_bootstrap = gk20a_init_pmu_setup_hw1, + + gops->pmu.load_lsfalcon_ucode = NULL; + gops->pmu.init_wpr_region = NULL; + gops->pmu.pmu_setup_hw_and_bootstrap = gp10b_init_pmu_setup_hw1; + + gops->gr.load_ctxsw_ucode = gr_gk20a_load_ctxsw_ucode; + } + + __nvgpu_set_enabled(g, NVGPU_PMU_FECS_BOOTSTRAP_DONE, false); + g->bootstrap_owner = LSF_BOOTSTRAP_OWNER_DEFAULT; + + g->name = "gv11b"; + + return 0; +} diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c new file mode 100644 index 000000000..857e58c43 --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.c @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c) +{ + struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header; + struct tegra_vgpu_cmd_msg msg = {}; + struct tegra_vgpu_alloc_ctx_header_params *p = + &msg.params.t19x.alloc_ctx_header; + struct gr_gk20a *gr = &c->g->gr; + int err; + + msg.cmd = TEGRA_VGPU_CMD_ALLOC_CTX_HEADER; + msg.handle = vgpu_get_handle(c->g); + p->ch_handle = c->virt_ctx; + p->ctx_header_va = __nvgpu_vm_alloc_va(c->vm, + gr->ctx_vars.golden_image_size, + gmmu_page_size_kernel); + if (!p->ctx_header_va) { + nvgpu_err(c->g, "alloc va failed for ctx_header"); + return -ENOMEM; + } + err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); + err = err ? err : msg.ret; + if (unlikely(err)) { + nvgpu_err(c->g, "alloc ctx_header failed err %d", err); + __nvgpu_vm_free_va(c->vm, p->ctx_header_va, + gmmu_page_size_kernel); + return err; + } + ctx->mem.gpu_va = p->ctx_header_va; + + return err; +} + +void vgpu_gv11b_free_subctx_header(struct channel_gk20a *c) +{ + struct ctx_header_desc *ctx = &c->ch_ctx.ctx_header; + struct tegra_vgpu_cmd_msg msg = {}; + struct tegra_vgpu_free_ctx_header_params *p = + &msg.params.t19x.free_ctx_header; + int err; + + if (ctx->mem.gpu_va) { + msg.cmd = TEGRA_VGPU_CMD_FREE_CTX_HEADER; + msg.handle = vgpu_get_handle(c->g); + p->ch_handle = c->virt_ctx; + err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); + err = err ? err : msg.ret; + if (unlikely(err)) + nvgpu_err(c->g, "free ctx_header failed err %d", err); + __nvgpu_vm_free_va(c->vm, ctx->mem.gpu_va, + gmmu_page_size_kernel); + ctx->mem.gpu_va = 0; + } +} diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.h new file mode 100644 index 000000000..0e09f4f69 --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_subctx_gv11b.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _VGPU_SUBCTX_GV11B_H_ +#define _VGPU_SUBCTX_GV11B_H_ + +struct channel_gk20a; + +int vgpu_gv11b_alloc_subctx_header(struct channel_gk20a *c); +void vgpu_gv11b_free_subctx_header(struct channel_gk20a *c); + +#endif diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c new file mode 100644 index 000000000..7e70272ad --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.c @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include +#include +#include + +#include "vgpu_tsg_gv11b.h" + +int vgpu_gv11b_tsg_bind_channel(struct tsg_gk20a *tsg, + struct channel_gk20a *ch) +{ + struct tegra_vgpu_cmd_msg msg = {}; + struct tegra_vgpu_tsg_bind_channel_ex_params *p = + &msg.params.t19x.tsg_bind_channel_ex; + int err; + + gk20a_dbg_fn(""); + + err = gk20a_tsg_bind_channel(tsg, ch); + if (err) + return err; + + msg.cmd = TEGRA_VGPU_CMD_TSG_BIND_CHANNEL_EX; + msg.handle = vgpu_get_handle(tsg->g); + p->tsg_id = tsg->tsgid; + p->ch_handle = ch->virt_ctx; + p->subctx_id = ch->t19x.subctx_id; + p->runqueue_sel = ch->t19x.runqueue_sel; + err = vgpu_comm_sendrecv(&msg, sizeof(msg), sizeof(msg)); + err = err ? err : msg.ret; + if (err) { + nvgpu_err(tsg->g, + "vgpu_gv11b_tsg_bind_channel failed, ch %d tsgid %d", + ch->chid, tsg->tsgid); + gk20a_tsg_unbind_channel(ch); + } + + return err; +} diff --git a/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.h b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.h new file mode 100644 index 000000000..c7bb2f4e2 --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/gv11b/vgpu_tsg_gv11b.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _VGPU_TSG_GV11B_H_ +#define _VGPU_TSG_GV11B_H_ + +int vgpu_gv11b_tsg_bind_channel(struct tsg_gk20a *tsg, + struct channel_gk20a *ch); + +#endif diff --git a/drivers/gpu/nvgpu/vgpu/vgpu_t19x.h b/drivers/gpu/nvgpu/vgpu/vgpu_t19x.h new file mode 100644 index 000000000..8c020f800 --- /dev/null +++ b/drivers/gpu/nvgpu/vgpu/vgpu_t19x.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef _VGPU_T19X_H_ +#define _VGPU_T19X_H_ + +struct gk20a; + +int vgpu_gv11b_init_hal(struct gk20a *g); + +#define vgpu_t19x_init_hal(g) vgpu_gv11b_init_hal(g) + +#define TEGRA_19x_VGPU_COMPAT_TEGRA "nvidia,gv11b-vgpu" +extern struct gk20a_platform gv11b_vgpu_tegra_platform; +#define t19x_vgpu_tegra_platform gv11b_vgpu_tegra_platform + +#endif diff --git a/include/linux/tegra_gpu_t19x.h b/include/linux/tegra_gpu_t19x.h new file mode 100644 index 000000000..f6157c127 --- /dev/null +++ b/include/linux/tegra_gpu_t19x.h @@ -0,0 +1,24 @@ +/* + * Tegra GPU Virtualization Interfaces to Server + * + * Copyright (c) 2016, NVIDIA Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __TEGRA_GPU_T19X_H +#define __TEGRA_GPU_T19X_H + +#define GPU_LIT_NUM_SUBCTX 99 + +#endif diff --git a/include/linux/tegra_vgpu_t19x.h b/include/linux/tegra_vgpu_t19x.h new file mode 100644 index 000000000..38dbbf605 --- /dev/null +++ b/include/linux/tegra_vgpu_t19x.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __TEGRA_VGPU_T19X_H +#define __TEGRA_VGPU_T19X_H + +#define TEGRA_VGPU_CMD_ALLOC_CTX_HEADER 100 +#define TEGRA_VGPU_CMD_FREE_CTX_HEADER 101 +#define TEGRA_VGPU_CMD_MAP_SYNCPT 102 +#define TEGRA_VGPU_CMD_TSG_BIND_CHANNEL_EX 103 + +struct tegra_vgpu_alloc_ctx_header_params { + u64 ch_handle; + u64 ctx_header_va; +}; + +struct tegra_vgpu_free_ctx_header_params { + u64 ch_handle; +}; + +struct tegra_vgpu_map_syncpt_params { + u64 as_handle; + u64 gpu_va; + u64 len; + u64 offset; + u8 prot; +}; + +struct tegra_vgpu_tsg_bind_channel_ex_params { + u32 tsg_id; + u64 ch_handle; + u32 subctx_id; + u32 runqueue_sel; +}; + +union tegra_vgpu_t19x_params { + struct tegra_vgpu_alloc_ctx_header_params alloc_ctx_header; + struct tegra_vgpu_free_ctx_header_params free_ctx_header; + struct tegra_vgpu_map_syncpt_params map_syncpt; + struct tegra_vgpu_tsg_bind_channel_ex_params tsg_bind_channel_ex; +}; + +#define TEGRA_VGPU_ATTRIB_MAX_SUBCTX_COUNT 100 + +#endif diff --git a/include/uapi/linux/nvgpu-t19x.h b/include/uapi/linux/nvgpu-t19x.h new file mode 100644 index 000000000..27db97c02 --- /dev/null +++ b/include/uapi/linux/nvgpu-t19x.h @@ -0,0 +1,59 @@ +/* + * NVGPU Public Interface Header + * + * Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +/* This file is meant to extend nvgpu.h, not replace it + * as such, be sure that nvgpu.h is actually the file performing the + * inclusion, to the extent that's possible. + */ +#ifndef _UAPI__LINUX_NVGPU_IOCTL_H +# error "This file is to be included within nvgpu.h only." +#endif + +#ifndef _UAPI__LINUX_NVGPU_T19X_IOCTL_H_ +#define _UAPI__LINUX_NVGPU_T19X_IOCTL_H_ + +#define NVGPU_GPU_ARCH_GV110 0x00000150 +#define NVGPU_GPU_ARCH_GV100 0x00000140 +#define NVGPU_GPU_IMPL_GV11B 0x0000000B +#define NVGPU_GPU_IMPL_GV100 0x00000000 + +/* + * this flag is used in struct nvgpu_as_map_buffer_ex_args + * to provide L3 cache allocation hint + */ +#define NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC (1 << 7) + +/* subcontexts are available */ +#define NVGPU_GPU_FLAGS_SUPPORT_TSG_SUBCONTEXTS (1ULL << 22) + +struct nvgpu_tsg_bind_channel_ex_args { + /* in: channel fd */ + __s32 channel_fd; + + /* in: VEID in Volta */ + __u32 subcontext_id; + __u32 num_active_tpcs; + __u8 tpc_pg_enabled; + __u8 reserved[11]; +}; + +#define NVGPU_TSG_IOCTL_BIND_CHANNEL_EX \ + _IOWR(NVGPU_TSG_IOCTL_MAGIC, 11, struct nvgpu_tsg_bind_channel_ex_args) + +#define NVGPU_TSG_IOCTL_MAX NVGPU_TSG_IOCTL_BIND_CHANNEL_EX + +#define NVGPU_TSG_IOCTL_MAX_ARG sizeof(struct nvgpu_tsg_bind_channel_ex_args) + +#endif /* _UAPI__LINUX_NVGPU_T19X_IOCTL_H_ */