mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 01:50:07 +03:00
Revert "gpu: nvgpu: Get coherency on gv100 + NVLINK working"
Also revert other changes related to IO coherence. This may be the culprit in a recent dev-kernel lockdown. Bug 2070609 Change-Id: Ida178aef161fadbc6db9512521ea51c702c1564b Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1665914 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Srikar Srimath Tirumala <srikars@nvidia.com>
This commit is contained in:
committed by
Srikar Srimath Tirumala
parent
3fdd8e38b2
commit
5a35a95654
@@ -221,16 +221,6 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
||||
NVGPU_DEFINE_DMA_ATTRS(dma_attrs);
|
||||
void *alloc_ret;
|
||||
|
||||
/*
|
||||
* WAR for IO coherent chips: the DMA API does not seem to generate
|
||||
* mappings that work correctly. Unclear why - Bug ID: 2040115.
|
||||
*
|
||||
* Basically we just tell the DMA API not to map with NO_KERNEL_MAPPING
|
||||
* and then make a vmap() ourselves.
|
||||
*/
|
||||
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
|
||||
flags |= NVGPU_DMA_NO_KERNEL_MAPPING;
|
||||
|
||||
/*
|
||||
* Before the debug print so we see this in the total. But during
|
||||
* cleanup in the fail path this has to be subtracted.
|
||||
@@ -265,17 +255,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
||||
iova, size, flags);
|
||||
}
|
||||
if (err)
|
||||
goto fail_free_dma;
|
||||
|
||||
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM)) {
|
||||
mem->cpu_va = vmap(mem->priv.pages,
|
||||
size >> PAGE_SHIFT,
|
||||
0, PAGE_KERNEL);
|
||||
if (!mem->cpu_va) {
|
||||
err = -ENOMEM;
|
||||
goto fail_free_sgt;
|
||||
}
|
||||
}
|
||||
goto fail_free;
|
||||
|
||||
mem->aligned_size = size;
|
||||
mem->aperture = APERTURE_SYSMEM;
|
||||
@@ -285,14 +265,12 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
|
||||
|
||||
return 0;
|
||||
|
||||
fail_free_sgt:
|
||||
nvgpu_free_sgtable(g, &mem->priv.sgt);
|
||||
fail_free_dma:
|
||||
fail_free:
|
||||
g->dma_memory_used -= mem->aligned_size;
|
||||
dma_free_attrs(d, size, alloc_ret, iova, NVGPU_DMA_ATTR(dma_attrs));
|
||||
mem->cpu_va = NULL;
|
||||
mem->priv.sgt = NULL;
|
||||
mem->size = 0;
|
||||
g->dma_memory_used -= mem->aligned_size;
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -488,12 +466,6 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY) &&
|
||||
!(mem->mem_flags & __NVGPU_MEM_FLAG_NO_DMA) &&
|
||||
(mem->cpu_va || mem->priv.pages)) {
|
||||
/*
|
||||
* Free side of WAR for bug 2040115.
|
||||
*/
|
||||
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
|
||||
vunmap(mem->cpu_va);
|
||||
|
||||
if (mem->priv.flags) {
|
||||
NVGPU_DEFINE_DMA_ATTRS(dma_attrs);
|
||||
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/reset.h>
|
||||
@@ -1108,7 +1107,6 @@ static int gk20a_probe(struct platform_device *dev)
|
||||
struct gk20a *gk20a;
|
||||
int err;
|
||||
struct gk20a_platform *platform = NULL;
|
||||
struct device_node *np;
|
||||
|
||||
if (dev->dev.of_node) {
|
||||
const struct of_device_id *match;
|
||||
@@ -1149,12 +1147,6 @@ static int gk20a_probe(struct platform_device *dev)
|
||||
if (err)
|
||||
goto return_err;
|
||||
|
||||
np = nvgpu_get_node(gk20a);
|
||||
if (of_dma_is_coherent(np)) {
|
||||
__nvgpu_set_enabled(gk20a, NVGPU_USE_COHERENT_SYSMEM, true);
|
||||
__nvgpu_set_enabled(gk20a, NVGPU_SUPPORT_IO_COHERENCE, true);
|
||||
}
|
||||
|
||||
if (nvgpu_platform_is_simulation(gk20a))
|
||||
__nvgpu_set_enabled(gk20a, NVGPU_IS_FMODEL, true);
|
||||
|
||||
|
||||
@@ -34,24 +34,39 @@
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "gk20a/mm_gk20a.h"
|
||||
|
||||
u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
|
||||
u32 sysmem_mask, u32 vidmem_mask)
|
||||
{
|
||||
switch (aperture) {
|
||||
case APERTURE_SYSMEM:
|
||||
/* some igpus consider system memory vidmem */
|
||||
return nvgpu_is_enabled(g, NVGPU_MM_HONORS_APERTURE)
|
||||
? sysmem_mask : vidmem_mask;
|
||||
case APERTURE_VIDMEM:
|
||||
/* for dgpus only */
|
||||
return vidmem_mask;
|
||||
case APERTURE_INVALID:
|
||||
WARN_ON("Bad aperture");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 sysmem_mask, u32 vidmem_mask)
|
||||
{
|
||||
return __nvgpu_aperture_mask(g, mem->aperture,
|
||||
sysmem_mask, vidmem_mask);
|
||||
}
|
||||
|
||||
int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
{
|
||||
void *cpu_va;
|
||||
pgprot_t prot = nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ?
|
||||
PAGE_KERNEL :
|
||||
pgprot_t prot = nvgpu_is_enabled(g, NVGPU_DMA_COHERENT) ? PAGE_KERNEL :
|
||||
pgprot_writecombine(PAGE_KERNEL);
|
||||
|
||||
if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* WAR for bug 2040115: we already will always have a coherent vmap()
|
||||
* for all sysmem buffers. The prot settings are left alone since
|
||||
* eventually this should be deleted.
|
||||
*/
|
||||
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* A CPU mapping is implicitly made for all SYSMEM DMA allocations that
|
||||
* don't have NVGPU_DMA_NO_KERNEL_MAPPING. Thus we don't need to make
|
||||
@@ -81,13 +96,6 @@ void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem)
|
||||
if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
|
||||
return;
|
||||
|
||||
/*
|
||||
* WAR for bug 2040115: skip this since the map will be taken care of
|
||||
* during the free in the DMA API.
|
||||
*/
|
||||
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Similar to nvgpu_mem_begin() we don't need to unmap the CPU mapping
|
||||
* already made by the DMA API.
|
||||
@@ -307,8 +315,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
|
||||
*/
|
||||
u64 nvgpu_mem_get_addr_sgl(struct gk20a *g, struct scatterlist *sgl)
|
||||
{
|
||||
if (nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) ||
|
||||
!nvgpu_iommuable(g))
|
||||
if (!nvgpu_iommuable(g))
|
||||
return g->ops.mm.gpu_phys_addr(g, NULL, sg_phys(sgl));
|
||||
|
||||
if (sg_dma_address(sgl) == 0)
|
||||
@@ -408,12 +415,8 @@ int nvgpu_mem_create_from_mem(struct gk20a *g,
|
||||
|
||||
/*
|
||||
* Re-use the CPU mapping only if the mapping was made by the DMA API.
|
||||
*
|
||||
* Bug 2040115: the DMA API wrapper makes the mapping that we should
|
||||
* re-use.
|
||||
*/
|
||||
if (!(src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) ||
|
||||
nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
|
||||
if (!(src->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING))
|
||||
dest->cpu_va = src->cpu_va + (PAGE_SIZE * start_page);
|
||||
|
||||
dest->priv.pages = src->priv.pages + start_page;
|
||||
|
||||
@@ -17,13 +17,13 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_address.h>
|
||||
|
||||
#include <nvgpu/nvgpu_common.h>
|
||||
#include <nvgpu/kmem.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/nvlink.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_address.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "clk/clk.h"
|
||||
@@ -566,12 +566,6 @@ static int nvgpu_pci_probe(struct pci_dev *pdev,
|
||||
platform->g = g;
|
||||
l->dev = &pdev->dev;
|
||||
|
||||
np = nvgpu_get_node(g);
|
||||
if (of_dma_is_coherent(np)) {
|
||||
__nvgpu_set_enabled(g, NVGPU_USE_COHERENT_SYSMEM, true);
|
||||
__nvgpu_set_enabled(g, NVGPU_SUPPORT_IO_COHERENCE, true);
|
||||
}
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
if (err)
|
||||
return err;
|
||||
@@ -650,6 +644,13 @@ static int nvgpu_pci_probe(struct pci_dev *pdev,
|
||||
|
||||
g->mm.has_physical_mode = false;
|
||||
|
||||
np = nvgpu_get_node(g);
|
||||
|
||||
if (of_dma_is_coherent(np)) {
|
||||
__nvgpu_set_enabled(g, NVGPU_DMA_COHERENT, true);
|
||||
__nvgpu_set_enabled(g, NVGPU_SUPPORT_IO_COHERENCE, true);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@@ -166,8 +166,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm,
|
||||
vm->gmmu_page_sizes[mapped_buffer->pgsz_idx] >> 10,
|
||||
vm_aspace_id(vm),
|
||||
mapped_buffer->flags,
|
||||
nvgpu_aperture_str(g,
|
||||
gk20a_dmabuf_aperture(g, os_buf->dmabuf)));
|
||||
nvgpu_aperture_str(gk20a_dmabuf_aperture(g, os_buf->dmabuf)));
|
||||
|
||||
return mapped_buffer;
|
||||
}
|
||||
|
||||
@@ -79,13 +79,6 @@ static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
|
||||
if (!sgt)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* If the GPU is IO coherent and the DMA API is giving us IO coherent
|
||||
* CPU mappings then we gotta make sure we use the IO coherent aperture.
|
||||
*/
|
||||
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM))
|
||||
flags |= NVGPU_VM_MAP_IO_COHERENT;
|
||||
|
||||
nvgpu_mutex_acquire(&vm->update_gmmu_lock);
|
||||
vaddr = g->ops.mm.gmmu_map(vm, addr,
|
||||
sgt, /* sg list */
|
||||
@@ -634,7 +627,7 @@ static int __nvgpu_gmmu_update_page_table(struct vm_gk20a *vm,
|
||||
page_size >> 10,
|
||||
nvgpu_gmmu_perm_str(attrs->rw_flag),
|
||||
attrs->kind_v,
|
||||
nvgpu_aperture_str(g, attrs->aperture),
|
||||
nvgpu_aperture_str(attrs->aperture),
|
||||
attrs->cacheable ? 'C' : '-',
|
||||
attrs->sparse ? 'S' : '-',
|
||||
attrs->priv ? 'P' : '-',
|
||||
@@ -711,13 +704,6 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
|
||||
|
||||
attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC);
|
||||
|
||||
/*
|
||||
* Handle the IO coherency aperture: make sure the .aperture field is
|
||||
* correct based on the IO coherency flag.
|
||||
*/
|
||||
if (attrs.coherent && attrs.aperture == APERTURE_SYSMEM)
|
||||
attrs.aperture = __APERTURE_SYSMEM_COH;
|
||||
|
||||
/*
|
||||
* Only allocate a new GPU VA range if we haven't already been passed a
|
||||
* GPU VA range. This facilitates fixed mappings.
|
||||
|
||||
@@ -28,52 +28,6 @@
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
|
||||
/*
|
||||
* Make sure to use the right coherency aperture if you use this function! This
|
||||
* will not add any checks. If you want to simply use the default coherency then
|
||||
* use nvgpu_aperture_mask().
|
||||
*/
|
||||
u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
|
||||
u32 sysmem_mask, u32 sysmem_coh_mask, u32 vidmem_mask)
|
||||
{
|
||||
/*
|
||||
* Some iGPUs treat sysmem (i.e SoC DRAM) as vidmem. In these cases the
|
||||
* "sysmem" aperture should really be translated to VIDMEM.
|
||||
*/
|
||||
if (!nvgpu_is_enabled(g, NVGPU_MM_HONORS_APERTURE))
|
||||
aperture = APERTURE_VIDMEM;
|
||||
|
||||
switch (aperture) {
|
||||
case __APERTURE_SYSMEM_COH:
|
||||
return sysmem_coh_mask;
|
||||
case APERTURE_SYSMEM:
|
||||
return sysmem_mask;
|
||||
case APERTURE_VIDMEM:
|
||||
return vidmem_mask;
|
||||
case APERTURE_INVALID:
|
||||
WARN_ON("Bad aperture");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 sysmem_mask, u32 sysmem_coh_mask, u32 vidmem_mask)
|
||||
{
|
||||
enum nvgpu_aperture ap = mem->aperture;
|
||||
|
||||
/*
|
||||
* Handle the coherent aperture: ideally most of the driver is not
|
||||
* aware of the difference between coherent and non-coherent sysmem so
|
||||
* we add this translation step here.
|
||||
*/
|
||||
if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) &&
|
||||
ap == APERTURE_SYSMEM)
|
||||
ap = __APERTURE_SYSMEM_COH;
|
||||
|
||||
return __nvgpu_aperture_mask(g, ap,
|
||||
sysmem_mask, sysmem_coh_mask, vidmem_mask);
|
||||
}
|
||||
|
||||
void *nvgpu_sgt_get_next(struct nvgpu_sgt *sgt, void *sgl)
|
||||
{
|
||||
return sgt->ops->sgl_next(sgl);
|
||||
|
||||
@@ -21,7 +21,6 @@
|
||||
*/
|
||||
|
||||
#include <nvgpu/page_allocator.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/soc.h>
|
||||
#include <nvgpu/bus.h>
|
||||
@@ -156,9 +155,8 @@ int gk20a_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
|
||||
|
||||
gk20a_writel(g, bus_bar1_block_r(),
|
||||
nvgpu_aperture_mask(g, bar1_inst,
|
||||
bus_bar1_block_target_sys_mem_ncoh_f(),
|
||||
bus_bar1_block_target_sys_mem_coh_f(),
|
||||
bus_bar1_block_target_vid_mem_f()) |
|
||||
bus_bar1_block_target_sys_mem_ncoh_f(),
|
||||
bus_bar1_block_target_vid_mem_f()) |
|
||||
bus_bar1_block_mode_virtual_f() |
|
||||
bus_bar1_block_ptr_f(ptr_v));
|
||||
|
||||
|
||||
@@ -98,9 +98,8 @@ void gk20a_fb_tlb_invalidate(struct gk20a *g, struct nvgpu_mem *pdb)
|
||||
gk20a_writel(g, fb_mmu_invalidate_pdb_r(),
|
||||
fb_mmu_invalidate_pdb_addr_f(addr_lo) |
|
||||
nvgpu_aperture_mask(g, pdb,
|
||||
fb_mmu_invalidate_pdb_aperture_sys_mem_f(),
|
||||
fb_mmu_invalidate_pdb_aperture_sys_mem_f(),
|
||||
fb_mmu_invalidate_pdb_aperture_vid_mem_f()));
|
||||
fb_mmu_invalidate_pdb_aperture_sys_mem_f(),
|
||||
fb_mmu_invalidate_pdb_aperture_vid_mem_f()));
|
||||
|
||||
gk20a_writel(g, fb_mmu_invalidate_r(),
|
||||
fb_mmu_invalidate_all_va_true_f() |
|
||||
|
||||
@@ -653,7 +653,6 @@ int gk20a_fecs_trace_bind_channel(struct gk20a *g,
|
||||
return -ENOMEM;
|
||||
aperture = nvgpu_aperture_mask(g, &trace->trace_buf,
|
||||
ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_noncoherent_f(),
|
||||
ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_sys_mem_coherent_f(),
|
||||
ctxsw_prog_main_image_context_timestamp_buffer_ptr_hi_target_vid_mem_f());
|
||||
|
||||
if (nvgpu_mem_begin(g, mem))
|
||||
|
||||
@@ -28,7 +28,6 @@
|
||||
#include <nvgpu/dma.h>
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/semaphore.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/kmem.h>
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/soc.h>
|
||||
@@ -667,13 +666,11 @@ static void fifo_engine_exception_status(struct gk20a *g,
|
||||
static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
|
||||
{
|
||||
struct fifo_runlist_info_gk20a *runlist;
|
||||
struct fifo_engine_info_gk20a *engine_info;
|
||||
unsigned int runlist_id;
|
||||
u32 i;
|
||||
size_t runlist_size;
|
||||
u32 active_engine_id, pbdma_id, engine_id;
|
||||
int flags = nvgpu_is_enabled(g, NVGPU_MM_USE_PHYSICAL_SG) ?
|
||||
NVGPU_DMA_FORCE_CONTIGUOUS : 0;
|
||||
struct fifo_engine_info_gk20a *engine_info;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
@@ -708,9 +705,8 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f)
|
||||
f->num_runlist_entries, runlist_size);
|
||||
|
||||
for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) {
|
||||
int err = nvgpu_dma_alloc_flags_sys(g, flags,
|
||||
runlist_size,
|
||||
&runlist->mem[i]);
|
||||
int err = nvgpu_dma_alloc_sys(g, runlist_size,
|
||||
&runlist->mem[i]);
|
||||
if (err) {
|
||||
nvgpu_err(g, "memory allocation failed");
|
||||
goto clean_up_runlist;
|
||||
@@ -3240,9 +3236,8 @@ static int gk20a_fifo_update_runlist_locked(struct gk20a *g, u32 runlist_id,
|
||||
gk20a_writel(g, fifo_runlist_base_r(),
|
||||
fifo_runlist_base_ptr_f(u64_lo32(runlist_iova >> 12)) |
|
||||
nvgpu_aperture_mask(g, &runlist->mem[new_buf],
|
||||
fifo_runlist_base_target_sys_mem_ncoh_f(),
|
||||
fifo_runlist_base_target_sys_mem_coh_f(),
|
||||
fifo_runlist_base_target_vid_mem_f()));
|
||||
fifo_runlist_base_target_sys_mem_ncoh_f(),
|
||||
fifo_runlist_base_target_vid_mem_f()));
|
||||
}
|
||||
|
||||
gk20a_writel(g, fifo_runlist_r(),
|
||||
@@ -3764,9 +3759,8 @@ static int gk20a_fifo_commit_userd(struct channel_gk20a *c)
|
||||
nvgpu_mem_wr32(g, &c->inst_block,
|
||||
ram_in_ramfc_w() + ram_fc_userd_w(),
|
||||
nvgpu_aperture_mask(g, &g->fifo.userd,
|
||||
pbdma_userd_target_sys_mem_ncoh_f(),
|
||||
pbdma_userd_target_sys_mem_coh_f(),
|
||||
pbdma_userd_target_vid_mem_f()) |
|
||||
pbdma_userd_target_sys_mem_ncoh_f(),
|
||||
pbdma_userd_target_vid_mem_f()) |
|
||||
pbdma_userd_addr_f(addr_lo));
|
||||
|
||||
nvgpu_mem_wr32(g, &c->inst_block,
|
||||
|
||||
@@ -742,14 +742,13 @@ void gr_gk20a_ctx_patch_write(struct gk20a *g,
|
||||
|
||||
static u32 fecs_current_ctx_data(struct gk20a *g, struct nvgpu_mem *inst_block)
|
||||
{
|
||||
u64 ptr = nvgpu_inst_block_addr(g, inst_block) >>
|
||||
ram_in_base_shift_v();
|
||||
u32 ptr = u64_lo32(nvgpu_inst_block_addr(g, inst_block)
|
||||
>> ram_in_base_shift_v());
|
||||
u32 aperture = nvgpu_aperture_mask(g, inst_block,
|
||||
gr_fecs_current_ctx_target_sys_mem_ncoh_f(),
|
||||
gr_fecs_current_ctx_target_sys_mem_coh_f(),
|
||||
gr_fecs_current_ctx_target_vid_mem_f());
|
||||
gr_fecs_current_ctx_target_sys_mem_ncoh_f(),
|
||||
gr_fecs_current_ctx_target_vid_mem_f());
|
||||
|
||||
return gr_fecs_current_ctx_ptr_f(u64_lo32(ptr)) | aperture |
|
||||
return gr_fecs_current_ctx_ptr_f(ptr) | aperture |
|
||||
gr_fecs_current_ctx_valid_f(1);
|
||||
}
|
||||
|
||||
@@ -2172,18 +2171,16 @@ void gr_gk20a_load_falcon_bind_instblk(struct gk20a *g)
|
||||
|
||||
inst_ptr = nvgpu_inst_block_addr(g, &ucode_info->inst_blk_desc);
|
||||
gk20a_writel(g, gr_fecs_new_ctx_r(),
|
||||
gr_fecs_new_ctx_ptr_f(inst_ptr >> 12) |
|
||||
nvgpu_aperture_mask(g, &ucode_info->inst_blk_desc,
|
||||
gr_fecs_new_ctx_ptr_f(inst_ptr >> 12) |
|
||||
nvgpu_aperture_mask(g, &ucode_info->inst_blk_desc,
|
||||
gr_fecs_new_ctx_target_sys_mem_ncoh_f(),
|
||||
gr_fecs_new_ctx_target_sys_mem_coh_f(),
|
||||
gr_fecs_new_ctx_target_vid_mem_f()) |
|
||||
gr_fecs_new_ctx_valid_m());
|
||||
gr_fecs_new_ctx_valid_m());
|
||||
|
||||
gk20a_writel(g, gr_fecs_arb_ctx_ptr_r(),
|
||||
gr_fecs_arb_ctx_ptr_ptr_f(inst_ptr >> 12) |
|
||||
nvgpu_aperture_mask(g, &ucode_info->inst_blk_desc,
|
||||
gr_fecs_arb_ctx_ptr_ptr_f(inst_ptr >> 12) |
|
||||
nvgpu_aperture_mask(g, &ucode_info->inst_blk_desc,
|
||||
gr_fecs_arb_ctx_ptr_target_sys_mem_ncoh_f(),
|
||||
gr_fecs_arb_ctx_ptr_target_sys_mem_coh_f(),
|
||||
gr_fecs_arb_ctx_ptr_target_vid_mem_f()));
|
||||
|
||||
gk20a_writel(g, gr_fecs_arb_ctx_cmd_r(), 0x7);
|
||||
@@ -4382,9 +4379,8 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
|
||||
|
||||
gk20a_writel(g, fb_mmu_debug_wr_r(),
|
||||
nvgpu_aperture_mask(g, &gr->mmu_wr_mem,
|
||||
fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(),
|
||||
fb_mmu_debug_wr_aperture_sys_mem_coh_f(),
|
||||
fb_mmu_debug_wr_aperture_vid_mem_f()) |
|
||||
fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(),
|
||||
fb_mmu_debug_wr_aperture_vid_mem_f()) |
|
||||
fb_mmu_debug_wr_vol_false_f() |
|
||||
fb_mmu_debug_wr_addr_f(addr));
|
||||
|
||||
@@ -4393,9 +4389,8 @@ static int gk20a_init_gr_setup_hw(struct gk20a *g)
|
||||
|
||||
gk20a_writel(g, fb_mmu_debug_rd_r(),
|
||||
nvgpu_aperture_mask(g, &gr->mmu_rd_mem,
|
||||
fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(),
|
||||
fb_mmu_debug_wr_aperture_sys_mem_coh_f(),
|
||||
fb_mmu_debug_rd_aperture_vid_mem_f()) |
|
||||
fb_mmu_debug_wr_aperture_sys_mem_ncoh_f(),
|
||||
fb_mmu_debug_rd_aperture_vid_mem_f()) |
|
||||
fb_mmu_debug_rd_vol_false_f() |
|
||||
fb_mmu_debug_rd_addr_f(addr));
|
||||
|
||||
|
||||
@@ -122,9 +122,8 @@ static inline u32 big_valid_pde0_bits(struct gk20a *g,
|
||||
{
|
||||
u32 pde0_bits =
|
||||
nvgpu_aperture_mask(g, pd->mem,
|
||||
gmmu_pde_aperture_big_sys_mem_ncoh_f(),
|
||||
gmmu_pde_aperture_big_sys_mem_coh_f(),
|
||||
gmmu_pde_aperture_big_video_memory_f()) |
|
||||
gmmu_pde_aperture_big_sys_mem_ncoh_f(),
|
||||
gmmu_pde_aperture_big_video_memory_f()) |
|
||||
gmmu_pde_address_big_sys_f(
|
||||
(u32)(addr >> gmmu_pde_address_shift_v()));
|
||||
|
||||
@@ -136,9 +135,8 @@ static inline u32 small_valid_pde1_bits(struct gk20a *g,
|
||||
{
|
||||
u32 pde1_bits =
|
||||
nvgpu_aperture_mask(g, pd->mem,
|
||||
gmmu_pde_aperture_small_sys_mem_ncoh_f(),
|
||||
gmmu_pde_aperture_small_sys_mem_coh_f(),
|
||||
gmmu_pde_aperture_small_video_memory_f()) |
|
||||
gmmu_pde_aperture_small_sys_mem_ncoh_f(),
|
||||
gmmu_pde_aperture_small_video_memory_f()) |
|
||||
gmmu_pde_vol_small_true_f() | /* tbd: why? */
|
||||
gmmu_pde_address_small_sys_f(
|
||||
(u32)(addr >> gmmu_pde_address_shift_v()));
|
||||
@@ -217,7 +215,6 @@ static void __update_pte(struct vm_gk20a *vm,
|
||||
|
||||
pte_w[1] = __nvgpu_aperture_mask(g, attrs->aperture,
|
||||
gmmu_pte_aperture_sys_mem_ncoh_f(),
|
||||
gmmu_pte_aperture_sys_mem_coh_f(),
|
||||
gmmu_pte_aperture_video_memory_f()) |
|
||||
gmmu_pte_kind_f(attrs->kind_v) |
|
||||
gmmu_pte_comptagline_f((u32)(attrs->ctag >> ctag_shift));
|
||||
@@ -271,7 +268,7 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
|
||||
page_size >> 10,
|
||||
nvgpu_gmmu_perm_str(attrs->rw_flag),
|
||||
attrs->kind_v,
|
||||
nvgpu_aperture_str(g, attrs->aperture),
|
||||
nvgpu_aperture_str(attrs->aperture),
|
||||
attrs->cacheable ? 'C' : '-',
|
||||
attrs->sparse ? 'S' : '-',
|
||||
attrs->priv ? 'P' : '-',
|
||||
@@ -366,12 +363,11 @@ void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block,
|
||||
gk20a_dbg_info("pde pa=0x%llx", pdb_addr);
|
||||
|
||||
nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(),
|
||||
nvgpu_aperture_mask(g, vm->pdb.mem,
|
||||
ram_in_page_dir_base_target_sys_mem_ncoh_f(),
|
||||
ram_in_page_dir_base_target_sys_mem_coh_f(),
|
||||
ram_in_page_dir_base_target_vid_mem_f()) |
|
||||
ram_in_page_dir_base_vol_true_f() |
|
||||
ram_in_page_dir_base_lo_f(pdb_addr_lo));
|
||||
nvgpu_aperture_mask(g, vm->pdb.mem,
|
||||
ram_in_page_dir_base_target_sys_mem_ncoh_f(),
|
||||
ram_in_page_dir_base_target_vid_mem_f()) |
|
||||
ram_in_page_dir_base_vol_true_f() |
|
||||
ram_in_page_dir_base_lo_f(pdb_addr_lo));
|
||||
|
||||
nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_hi_w(),
|
||||
ram_in_page_dir_base_hi_f(pdb_addr_hi));
|
||||
|
||||
@@ -41,7 +41,6 @@ u32 gk20a_pramin_enter(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 lo = (u32)(addr & 0xfffff);
|
||||
u32 win = nvgpu_aperture_mask(g, mem,
|
||||
bus_bar0_window_target_sys_mem_noncoherent_f(),
|
||||
bus_bar0_window_target_sys_mem_coherent_f(),
|
||||
bus_bar0_window_target_vid_mem_f()) |
|
||||
bus_bar0_window_base_f(hi);
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/bus.h>
|
||||
#include <nvgpu/mm.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
|
||||
#include "bus_gm20b.h"
|
||||
#include "gk20a/gk20a.h"
|
||||
@@ -44,9 +43,8 @@ int gm20b_bus_bar1_bind(struct gk20a *g, struct nvgpu_mem *bar1_inst)
|
||||
|
||||
gk20a_writel(g, bus_bar1_block_r(),
|
||||
nvgpu_aperture_mask(g, bar1_inst,
|
||||
bus_bar1_block_target_sys_mem_ncoh_f(),
|
||||
bus_bar1_block_target_sys_mem_coh_f(),
|
||||
bus_bar1_block_target_vid_mem_f()) |
|
||||
bus_bar1_block_target_sys_mem_ncoh_f(),
|
||||
bus_bar1_block_target_vid_mem_f()) |
|
||||
bus_bar1_block_mode_virtual_f() |
|
||||
bus_bar1_block_ptr_f(ptr_v));
|
||||
nvgpu_timeout_init(g, &timeout, 1000, NVGPU_TIMER_RETRY_TIMER);
|
||||
|
||||
@@ -32,7 +32,6 @@
|
||||
#include <nvgpu/atomic.h>
|
||||
#include <nvgpu/barrier.h>
|
||||
#include <nvgpu/mm.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
|
||||
#include <nvgpu/hw/gm20b/hw_ccsr_gm20b.h>
|
||||
#include <nvgpu/hw/gm20b/hw_ram_gm20b.h>
|
||||
@@ -52,12 +51,11 @@ void channel_gm20b_bind(struct channel_gk20a *c)
|
||||
|
||||
|
||||
gk20a_writel(g, ccsr_channel_inst_r(c->chid),
|
||||
ccsr_channel_inst_ptr_f(inst_ptr) |
|
||||
nvgpu_aperture_mask(g, &c->inst_block,
|
||||
ccsr_channel_inst_target_sys_mem_ncoh_f(),
|
||||
ccsr_channel_inst_target_sys_mem_coh_f(),
|
||||
ccsr_channel_inst_target_vid_mem_f()) |
|
||||
ccsr_channel_inst_bind_true_f());
|
||||
ccsr_channel_inst_ptr_f(inst_ptr) |
|
||||
nvgpu_aperture_mask(g, &c->inst_block,
|
||||
ccsr_channel_inst_target_sys_mem_ncoh_f(),
|
||||
ccsr_channel_inst_target_vid_mem_f()) |
|
||||
ccsr_channel_inst_bind_true_f());
|
||||
|
||||
gk20a_writel(g, ccsr_channel_r(c->chid),
|
||||
(gk20a_readl(g, ccsr_channel_r(c->chid)) &
|
||||
|
||||
@@ -99,7 +99,6 @@ int bl_bootstrap_sec2(struct nvgpu_pmu *pmu,
|
||||
nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
|
||||
pwr_pmu_new_instblk_valid_f(1) |
|
||||
nvgpu_aperture_mask(g, &mm->pmu.inst_block,
|
||||
pwr_pmu_new_instblk_target_sys_ncoh_f(),
|
||||
pwr_pmu_new_instblk_target_sys_coh_f(),
|
||||
pwr_pmu_new_instblk_target_fb_f()));
|
||||
|
||||
@@ -166,7 +165,6 @@ void init_pmu_setup_hw1(struct gk20a *g)
|
||||
nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
|
||||
pwr_pmu_new_instblk_valid_f(1) |
|
||||
nvgpu_aperture_mask(g, &mm->pmu.inst_block,
|
||||
pwr_pmu_new_instblk_target_sys_ncoh_f(),
|
||||
pwr_pmu_new_instblk_target_sys_coh_f(),
|
||||
pwr_pmu_new_instblk_target_fb_f()));
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@
|
||||
#include <nvgpu/dma.h>
|
||||
#include <nvgpu/bug.h>
|
||||
#include <nvgpu/log2.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
|
||||
#include "fifo_gp10b.h"
|
||||
|
||||
@@ -79,9 +78,8 @@ int channel_gp10b_commit_userd(struct channel_gk20a *c)
|
||||
nvgpu_mem_wr32(g, &c->inst_block,
|
||||
ram_in_ramfc_w() + ram_fc_userd_w(),
|
||||
nvgpu_aperture_mask(g, &g->fifo.userd,
|
||||
pbdma_userd_target_sys_mem_ncoh_f(),
|
||||
pbdma_userd_target_sys_mem_coh_f(),
|
||||
pbdma_userd_target_vid_mem_f()) |
|
||||
pbdma_userd_target_sys_mem_ncoh_f(),
|
||||
pbdma_userd_target_vid_mem_f()) |
|
||||
pbdma_userd_addr_f(addr_lo));
|
||||
|
||||
nvgpu_mem_wr32(g, &c->inst_block,
|
||||
|
||||
@@ -124,9 +124,8 @@ int gp10b_init_bar2_mm_hw_setup(struct gk20a *g)
|
||||
|
||||
gk20a_writel(g, bus_bar2_block_r(),
|
||||
nvgpu_aperture_mask(g, inst_block,
|
||||
bus_bar2_block_target_sys_mem_ncoh_f(),
|
||||
bus_bar2_block_target_sys_mem_coh_f(),
|
||||
bus_bar2_block_target_vid_mem_f()) |
|
||||
bus_bar2_block_target_sys_mem_ncoh_f(),
|
||||
bus_bar2_block_target_vid_mem_f()) |
|
||||
bus_bar2_block_mode_virtual_f() |
|
||||
bus_bar2_block_ptr_f(inst_pa));
|
||||
|
||||
@@ -149,9 +148,8 @@ static void update_gmmu_pde3_locked(struct vm_gk20a *vm,
|
||||
phys_addr >>= gmmu_new_pde_address_shift_v();
|
||||
|
||||
pde_v[0] |= nvgpu_aperture_mask(g, pd->mem,
|
||||
gmmu_new_pde_aperture_sys_mem_ncoh_f(),
|
||||
gmmu_new_pde_aperture_sys_mem_coh_f(),
|
||||
gmmu_new_pde_aperture_video_memory_f());
|
||||
gmmu_new_pde_aperture_sys_mem_ncoh_f(),
|
||||
gmmu_new_pde_aperture_video_memory_f());
|
||||
pde_v[0] |= gmmu_new_pde_address_sys_f(u64_lo32(phys_addr));
|
||||
pde_v[0] |= gmmu_new_pde_vol_true_f();
|
||||
pde_v[1] |= phys_addr >> 24;
|
||||
@@ -196,7 +194,6 @@ static void update_gmmu_pde0_locked(struct vm_gk20a *vm,
|
||||
gmmu_new_dual_pde_address_small_sys_f(small_addr);
|
||||
pde_v[2] |= nvgpu_aperture_mask(g, pd->mem,
|
||||
gmmu_new_dual_pde_aperture_small_sys_mem_ncoh_f(),
|
||||
gmmu_new_dual_pde_aperture_small_sys_mem_coh_f(),
|
||||
gmmu_new_dual_pde_aperture_small_video_memory_f());
|
||||
pde_v[2] |= gmmu_new_dual_pde_vol_small_true_f();
|
||||
pde_v[3] |= small_addr >> 24;
|
||||
@@ -207,7 +204,6 @@ static void update_gmmu_pde0_locked(struct vm_gk20a *vm,
|
||||
pde_v[0] |= gmmu_new_dual_pde_vol_big_true_f();
|
||||
pde_v[0] |= nvgpu_aperture_mask(g, pd->mem,
|
||||
gmmu_new_dual_pde_aperture_big_sys_mem_ncoh_f(),
|
||||
gmmu_new_dual_pde_aperture_big_sys_mem_coh_f(),
|
||||
gmmu_new_dual_pde_aperture_big_video_memory_f());
|
||||
pde_v[1] |= big_addr >> 28;
|
||||
}
|
||||
@@ -244,10 +240,11 @@ static void __update_pte(struct vm_gk20a *vm,
|
||||
gmmu_new_pte_address_sys_f(phys_shifted) :
|
||||
gmmu_new_pte_address_vid_f(phys_shifted);
|
||||
u32 pte_tgt = __nvgpu_aperture_mask(g,
|
||||
attrs->aperture,
|
||||
gmmu_new_pte_aperture_sys_mem_ncoh_f(),
|
||||
gmmu_new_pte_aperture_sys_mem_coh_f(),
|
||||
gmmu_new_pte_aperture_video_memory_f());
|
||||
attrs->aperture,
|
||||
attrs->coherent ?
|
||||
gmmu_new_pte_aperture_sys_mem_coh_f() :
|
||||
gmmu_new_pte_aperture_sys_mem_ncoh_f(),
|
||||
gmmu_new_pte_aperture_video_memory_f());
|
||||
|
||||
pte_w[0] = pte_valid | pte_addr | pte_tgt;
|
||||
|
||||
@@ -309,7 +306,7 @@ static void update_gmmu_pte_locked(struct vm_gk20a *vm,
|
||||
page_size >> 10,
|
||||
nvgpu_gmmu_perm_str(attrs->rw_flag),
|
||||
attrs->kind_v,
|
||||
nvgpu_aperture_str(g, attrs->aperture),
|
||||
nvgpu_aperture_str(attrs->aperture),
|
||||
attrs->cacheable ? 'C' : '-',
|
||||
attrs->sparse ? 'S' : '-',
|
||||
attrs->priv ? 'P' : '-',
|
||||
@@ -431,9 +428,8 @@ void gp10b_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block,
|
||||
|
||||
nvgpu_mem_wr32(g, inst_block, ram_in_page_dir_base_lo_w(),
|
||||
nvgpu_aperture_mask(g, vm->pdb.mem,
|
||||
ram_in_page_dir_base_target_sys_mem_ncoh_f(),
|
||||
ram_in_page_dir_base_target_sys_mem_coh_f(),
|
||||
ram_in_page_dir_base_target_vid_mem_f()) |
|
||||
ram_in_page_dir_base_target_sys_mem_ncoh_f(),
|
||||
ram_in_page_dir_base_target_vid_mem_f()) |
|
||||
ram_in_page_dir_base_vol_true_f() |
|
||||
ram_in_big_page_size_64kb_f() |
|
||||
ram_in_page_dir_base_lo_f(pdb_addr_lo) |
|
||||
|
||||
@@ -27,10 +27,9 @@
|
||||
#include <nvgpu/nvgpu_common.h>
|
||||
#include <nvgpu/kmem.h>
|
||||
#include <nvgpu/nvgpu_mem.h>
|
||||
#include <nvgpu/acr/nvgpu_acr.h>
|
||||
#include <nvgpu/firmware.h>
|
||||
#include <nvgpu/mm.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/acr/nvgpu_acr.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "acr_gv11b.h"
|
||||
@@ -221,9 +220,7 @@ static int bl_bootstrap(struct nvgpu_pmu *pmu,
|
||||
pwr_pmu_new_instblk_ptr_f(
|
||||
nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> 12) |
|
||||
pwr_pmu_new_instblk_valid_f(1) |
|
||||
(nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ?
|
||||
pwr_pmu_new_instblk_target_sys_coh_f() :
|
||||
pwr_pmu_new_instblk_target_sys_ncoh_f())) ;
|
||||
pwr_pmu_new_instblk_target_sys_ncoh_f());
|
||||
|
||||
/*copy bootloader interface structure to dmem*/
|
||||
nvgpu_flcn_copy_to_dmem(pmu->flcn, 0, (u8 *)pbl_desc,
|
||||
|
||||
@@ -31,14 +31,14 @@
|
||||
#include <nvgpu/dma.h>
|
||||
#include <nvgpu/mm.h>
|
||||
#include <nvgpu/sizes.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/bug.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "gk20a/css_gr_gk20a.h"
|
||||
#include "css_gr_gv11b.h"
|
||||
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/bug.h>
|
||||
|
||||
#include <nvgpu/hw/gv11b/hw_perf_gv11b.h>
|
||||
#include <nvgpu/hw/gv11b/hw_mc_gv11b.h>
|
||||
|
||||
@@ -144,7 +144,6 @@ int gv11b_css_hw_enable_snapshot(struct channel_gk20a *ch,
|
||||
perf_pmasys_mem_block_valid_true_f() |
|
||||
nvgpu_aperture_mask(g, &g->mm.hwpm.inst_block,
|
||||
perf_pmasys_mem_block_target_sys_ncoh_f(),
|
||||
perf_pmasys_mem_block_target_sys_coh_f(),
|
||||
perf_pmasys_mem_block_target_lfb_f()));
|
||||
|
||||
|
||||
|
||||
@@ -59,12 +59,11 @@ int gv11b_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size)
|
||||
inst_pa_page = nvgpu_inst_block_addr(g, &mm->perfbuf.inst_block) >> 12;
|
||||
|
||||
gk20a_writel(g, perf_pmasys_mem_block_r(),
|
||||
perf_pmasys_mem_block_base_f(inst_pa_page) |
|
||||
perf_pmasys_mem_block_valid_true_f() |
|
||||
nvgpu_aperture_mask(g, &mm->perfbuf.inst_block,
|
||||
perf_pmasys_mem_block_target_sys_ncoh_f(),
|
||||
perf_pmasys_mem_block_target_sys_coh_f(),
|
||||
perf_pmasys_mem_block_target_lfb_f()));
|
||||
perf_pmasys_mem_block_base_f(inst_pa_page) |
|
||||
perf_pmasys_mem_block_valid_true_f() |
|
||||
nvgpu_aperture_mask(g, &mm->perfbuf.inst_block,
|
||||
+ perf_pmasys_mem_block_target_sys_ncoh_f(),
|
||||
+ perf_pmasys_mem_block_target_lfb_f()));
|
||||
|
||||
gk20a_idle(g);
|
||||
return 0;
|
||||
|
||||
@@ -101,14 +101,12 @@ void gv11b_get_ch_runlist_entry(struct channel_gk20a *c, u32 *runlist)
|
||||
c->runqueue_sel) |
|
||||
ram_rl_entry_chan_userd_target_f(
|
||||
nvgpu_aperture_mask(g, &g->fifo.userd,
|
||||
ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(),
|
||||
ram_rl_entry_chan_userd_target_sys_mem_coh_v(),
|
||||
ram_rl_entry_chan_userd_target_vid_mem_v())) |
|
||||
ram_rl_entry_chan_userd_target_sys_mem_ncoh_v(),
|
||||
ram_rl_entry_chan_userd_target_vid_mem_v())) |
|
||||
ram_rl_entry_chan_inst_target_f(
|
||||
nvgpu_aperture_mask(g, &c->inst_block,
|
||||
ram_rl_entry_chan_inst_target_sys_mem_ncoh_v(),
|
||||
ram_rl_entry_chan_inst_target_sys_mem_coh_v(),
|
||||
ram_rl_entry_chan_inst_target_vid_mem_v()));
|
||||
ram_rl_entry_chan_inst_target_sys_mem_ncoh_v(),
|
||||
ram_rl_entry_chan_inst_target_vid_mem_v()));
|
||||
|
||||
addr_lo = u64_lo32(c->userd_iova) >>
|
||||
ram_rl_entry_chan_userd_ptr_align_shift_v();
|
||||
|
||||
@@ -26,7 +26,6 @@
|
||||
#include <nvgpu/dma.h>
|
||||
#include <nvgpu/log.h>
|
||||
#include <nvgpu/mm.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
|
||||
#include "gk20a/gk20a.h"
|
||||
#include "gk20a/mm_gk20a.h"
|
||||
@@ -293,9 +292,8 @@ int gv11b_init_bar2_mm_hw_setup(struct gk20a *g)
|
||||
|
||||
gk20a_writel(g, bus_bar2_block_r(),
|
||||
nvgpu_aperture_mask(g, inst_block,
|
||||
bus_bar2_block_target_sys_mem_ncoh_f(),
|
||||
bus_bar2_block_target_sys_mem_coh_f(),
|
||||
bus_bar2_block_target_vid_mem_f()) |
|
||||
bus_bar2_block_target_sys_mem_ncoh_f(),
|
||||
bus_bar2_block_target_vid_mem_f()) |
|
||||
bus_bar2_block_mode_virtual_f() |
|
||||
bus_bar2_block_ptr_f(inst_pa));
|
||||
|
||||
|
||||
@@ -195,11 +195,9 @@ int gv11b_pmu_bootstrap(struct nvgpu_pmu *pmu)
|
||||
|
||||
gk20a_writel(g, pwr_pmu_new_instblk_r(),
|
||||
pwr_pmu_new_instblk_ptr_f(
|
||||
nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> ALIGN_4KB) |
|
||||
pwr_pmu_new_instblk_valid_f(1) |
|
||||
(nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ?
|
||||
pwr_pmu_new_instblk_target_sys_coh_f() :
|
||||
pwr_pmu_new_instblk_target_sys_ncoh_f()));
|
||||
nvgpu_inst_block_addr(g, &mm->pmu.inst_block) >> ALIGN_4KB)
|
||||
| pwr_pmu_new_instblk_valid_f(1)
|
||||
| pwr_pmu_new_instblk_target_sys_ncoh_f());
|
||||
|
||||
/* TBD: load all other surfaces */
|
||||
g->ops.pmu_ver.set_pmu_cmdline_args_trace_size(
|
||||
|
||||
@@ -177,9 +177,8 @@ void gv11b_subctx_commit_pdb(struct vm_gk20a *vm,
|
||||
u32 pdb_addr_lo, pdb_addr_hi;
|
||||
u64 pdb_addr;
|
||||
u32 aperture = nvgpu_aperture_mask(g, vm->pdb.mem,
|
||||
ram_in_sc_page_dir_base_target_sys_mem_ncoh_v(),
|
||||
ram_in_sc_page_dir_base_target_sys_mem_coh_v(),
|
||||
ram_in_sc_page_dir_base_target_vid_mem_v());
|
||||
ram_in_sc_page_dir_base_target_sys_mem_ncoh_v(),
|
||||
ram_in_sc_page_dir_base_target_vid_mem_v());
|
||||
|
||||
pdb_addr = nvgpu_mem_get_addr(g, vm->pdb.mem);
|
||||
pdb_addr_lo = u64_lo32(pdb_addr >> ram_in_base_shift_v());
|
||||
|
||||
@@ -75,8 +75,8 @@ struct gk20a;
|
||||
#define NVGPU_SUPPORT_MAP_DIRECT_KIND_CTRL 24
|
||||
/* Support batch mapping */
|
||||
#define NVGPU_SUPPORT_MAP_BUFFER_BATCH 25
|
||||
/* Use coherent aperture for sysmem. */
|
||||
#define NVGPU_USE_COHERENT_SYSMEM 26
|
||||
/* Support DMA coherence */
|
||||
#define NVGPU_DMA_COHERENT 26
|
||||
/* Use physical scatter tables instead of IOMMU */
|
||||
#define NVGPU_MM_USE_PHYSICAL_SG 27
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@
|
||||
|
||||
#include <nvgpu/types.h>
|
||||
#include <nvgpu/list.h>
|
||||
#include <nvgpu/enabled.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <nvgpu/linux/nvgpu_mem.h>
|
||||
@@ -52,10 +51,6 @@ struct nvgpu_page_alloc;
|
||||
enum nvgpu_aperture {
|
||||
APERTURE_INVALID = 0, /* unallocated or N/A */
|
||||
APERTURE_SYSMEM,
|
||||
|
||||
/* Don't use directly. Use APERTURE_SYSMEM, this is used internally. */
|
||||
__APERTURE_SYSMEM_COH,
|
||||
|
||||
APERTURE_VIDMEM
|
||||
};
|
||||
|
||||
@@ -192,18 +187,12 @@ nvgpu_mem_from_clear_list_entry(struct nvgpu_list_node *node)
|
||||
clear_list_entry));
|
||||
};
|
||||
|
||||
static inline const char *nvgpu_aperture_str(struct gk20a *g,
|
||||
enum nvgpu_aperture aperture)
|
||||
static inline const char *nvgpu_aperture_str(enum nvgpu_aperture aperture)
|
||||
{
|
||||
switch (aperture) {
|
||||
case APERTURE_INVALID:
|
||||
return "INVAL";
|
||||
case APERTURE_SYSMEM:
|
||||
return "SYSMEM";
|
||||
case __APERTURE_SYSMEM_COH:
|
||||
return "SYSCOH";
|
||||
case APERTURE_VIDMEM:
|
||||
return "VIDMEM";
|
||||
case APERTURE_INVALID: return "INVAL";
|
||||
case APERTURE_SYSMEM: return "SYSMEM";
|
||||
case APERTURE_VIDMEM: return "VIDMEM";
|
||||
};
|
||||
return "UNKNOWN";
|
||||
}
|
||||
@@ -333,9 +322,9 @@ u64 nvgpu_mem_get_addr(struct gk20a *g, struct nvgpu_mem *mem);
|
||||
u64 nvgpu_mem_get_phys_addr(struct gk20a *g, struct nvgpu_mem *mem);
|
||||
|
||||
u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
|
||||
u32 sysmem_mask, u32 sysmem_coh_mask, u32 vidmem_mask);
|
||||
u32 sysmem_mask, u32 vidmem_mask);
|
||||
u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
|
||||
u32 sysmem_mask, u32 sysmem_coh_mask, u32 vidmem_mask);
|
||||
u32 sysmem_mask, u32 vidmem_mask);
|
||||
|
||||
u64 nvgpu_mem_iommu_translate(struct gk20a *g, u64 phys);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user