mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-23 09:57:08 +03:00
gpu: nvgpu: Use nvgpu_mem instead of custom sgt
Use an nvgpu_mem created from a page in the ACR code instead of a scatter gather table created by the Linux SGT APIs. The ACR code needs to have the GPU map a physical page queried from an external API (other than the regular DMA API). Note that this code used to explicitly mark the SGT it makes as bypassing the SMMU. However, that is now taken care of implicitly by the __nvgpu_mem_create_from_pages() function. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: Ie40152a7611e985e1b97ac2ddc7e27664b71917c Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1464082 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
f6c92780ff
commit
c3817a56b7
@@ -22,6 +22,7 @@
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/nvgpu_common.h>
|
||||
#include <nvgpu/kmem.h>
|
||||
#include <nvgpu/nvgpu_mem.h>
|
||||
#include <nvgpu/acr/nvgpu_acr.h>
|
||||
#include <nvgpu/firmware.h>
|
||||
|
||||
@@ -386,7 +387,6 @@ int prepare_ucode_blob(struct gk20a *g)
|
||||
struct mm_gk20a *mm = &g->mm;
|
||||
struct vm_gk20a *vm = &mm->pmu.vm;
|
||||
struct wpr_carveout_info wpr_inf;
|
||||
struct sg_table *sgt;
|
||||
struct page *page;
|
||||
|
||||
if (g->acr.ucode_blob.cpu_va) {
|
||||
@@ -411,24 +411,11 @@ int prepare_ucode_blob(struct gk20a *g)
|
||||
gm20b_dbg_pmu("wpr carveout base:%llx\n", wpr_inf.wpr_base);
|
||||
gm20b_dbg_pmu("wpr carveout size :%x\n", wprsize);
|
||||
|
||||
sgt = nvgpu_kzalloc(g, sizeof(*sgt));
|
||||
if (!sgt) {
|
||||
nvgpu_err(g, "failed to allocate memory");
|
||||
return -ENOMEM;
|
||||
}
|
||||
err = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
if (err) {
|
||||
nvgpu_err(g, "failed to allocate sg_table");
|
||||
goto free_sgt;
|
||||
}
|
||||
page = phys_to_page(wpr_addr);
|
||||
sg_set_page(sgt->sgl, page, wprsize, 0);
|
||||
/* This bypasses SMMU for WPR during gmmu_map. */
|
||||
sg_dma_address(sgt->sgl) = 0;
|
||||
|
||||
g->pmu.wpr_buf.gpu_va = gk20a_gmmu_map(vm, &sgt, wprsize,
|
||||
0, gk20a_mem_flag_none, false,
|
||||
APERTURE_SYSMEM);
|
||||
__nvgpu_mem_create_from_pages(g, &g->pmu.wpr_buf, &page, 1);
|
||||
g->pmu.wpr_buf.gpu_va = gk20a_gmmu_map(vm, &g->pmu.wpr_buf.priv.sgt,
|
||||
wprsize, 0, gk20a_mem_flag_none,
|
||||
false, APERTURE_SYSMEM);
|
||||
gm20b_dbg_pmu("wpr mapped gpu va :%llx\n", g->pmu.wpr_buf.gpu_va);
|
||||
|
||||
/* Discover all managed falcons*/
|
||||
@@ -458,7 +445,8 @@ int prepare_ucode_blob(struct gk20a *g)
|
||||
gm20b_dbg_pmu("prepare ucode blob return 0\n");
|
||||
free_acr_resources(g, plsfm);
|
||||
free_sgt:
|
||||
nvgpu_free_sgtable(g, &sgt);
|
||||
gk20a_gmmu_unmap(vm, g->pmu.wpr_buf.gpu_va,
|
||||
g->pmu.wpr_buf.size, gk20a_mem_flag_none);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user