mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-22 09:12:24 +03:00
gpu: nvgpu: Remove alloc_inst_block from mm HAL
The alloc_insty_block() function in the MM HAL is not a HAL. It does not abstract any HW accesses; instead it just wraps a dma allocation. As such remove it from the HAL and move the single gk20a implementation to common/mm/mm.c as nvgpu_alloc_inst_block(). JIRA NVGPU-2042 Change-Id: I0a586800a11cd230ca43b85f94a35de107f5d1e1 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/2109049 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
a68f66d307
commit
c71e764348
@@ -24,6 +24,7 @@
|
||||
|
||||
#include <trace/events/gk20a.h>
|
||||
|
||||
#include <nvgpu/mm.h>
|
||||
#include <nvgpu/semaphore.h>
|
||||
#include <nvgpu/timers.h>
|
||||
#include <nvgpu/kmem.h>
|
||||
@@ -2503,7 +2504,7 @@ int nvgpu_channel_alloc_inst(struct gk20a *g, struct channel_gk20a *ch)
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = g->ops.mm.alloc_inst_block(g, &ch->inst_block);
|
||||
err = nvgpu_alloc_inst_block(g, &ch->inst_block);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -202,7 +202,7 @@ static int nvgpu_gr_falcon_init_ctxsw_ucode_vaspace(struct gk20a *g,
|
||||
struct nvgpu_ctxsw_ucode_info *ucode_info = &falcon->ctxsw_ucode_info;
|
||||
int err;
|
||||
|
||||
err = g->ops.mm.alloc_inst_block(g, &ucode_info->inst_blk_desc);
|
||||
err = nvgpu_alloc_inst_block(g, &ucode_info->inst_blk_desc);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -168,6 +168,22 @@ void nvgpu_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
|
||||
}
|
||||
}
|
||||
|
||||
int nvgpu_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
|
||||
{
|
||||
int err;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = nvgpu_dma_alloc(g, g->ops.ramin.alloc_size(), inst_block);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "%s: memory allocation failed", __func__);
|
||||
return err;
|
||||
}
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nvgpu_alloc_sysmem_flush(struct gk20a *g)
|
||||
{
|
||||
return nvgpu_dma_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush);
|
||||
@@ -269,7 +285,7 @@ static int nvgpu_init_system_vm(struct mm_gk20a *mm)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = g->ops.mm.alloc_inst_block(g, inst_block);
|
||||
err = nvgpu_alloc_inst_block(g, inst_block);
|
||||
if (err != 0) {
|
||||
goto clean_up_vm;
|
||||
}
|
||||
@@ -288,7 +304,7 @@ static int nvgpu_init_hwpm(struct mm_gk20a *mm)
|
||||
struct gk20a *g = gk20a_from_mm(mm);
|
||||
struct nvgpu_mem *inst_block = &mm->hwpm.inst_block;
|
||||
|
||||
err = g->ops.mm.alloc_inst_block(g, inst_block);
|
||||
err = nvgpu_alloc_inst_block(g, inst_block);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
@@ -411,7 +427,7 @@ static int nvgpu_init_bar1_vm(struct mm_gk20a *mm)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = g->ops.mm.alloc_inst_block(g, inst_block);
|
||||
err = nvgpu_alloc_inst_block(g, inst_block);
|
||||
if (err != 0) {
|
||||
goto clean_up_vm;
|
||||
}
|
||||
@@ -444,7 +460,7 @@ static int nvgpu_init_engine_ucode_vm(struct gk20a *g,
|
||||
}
|
||||
|
||||
/* allocate instance mem for engine ucode */
|
||||
err = g->ops.mm.alloc_inst_block(g, inst_block);
|
||||
err = nvgpu_alloc_inst_block(g, inst_block);
|
||||
if (err != 0) {
|
||||
goto clean_up_va;
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <nvgpu/mm.h>
|
||||
#include <nvgpu/sizes.h>
|
||||
#include <nvgpu/perfbuf.h>
|
||||
#include <nvgpu/gk20a.h>
|
||||
@@ -35,7 +36,7 @@ int nvgpu_perfbuf_enable_locked(struct gk20a *g, u64 offset, u32 size)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = g->ops.mm.alloc_inst_block(g, &mm->perfbuf.inst_block);
|
||||
err = nvgpu_alloc_inst_block(g, &mm->perfbuf.inst_block);
|
||||
if (err != 0) {
|
||||
return err;
|
||||
}
|
||||
|
||||
@@ -100,22 +100,6 @@ void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm,
|
||||
}
|
||||
}
|
||||
|
||||
int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
|
||||
{
|
||||
int err;
|
||||
|
||||
nvgpu_log_fn(g, " ");
|
||||
|
||||
err = nvgpu_dma_alloc(g, g->ops.ramin.alloc_size(), inst_block);
|
||||
if (err != 0) {
|
||||
nvgpu_err(g, "%s: memory allocation failed", __func__);
|
||||
return err;
|
||||
}
|
||||
|
||||
nvgpu_log_fn(g, "done");
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 gk20a_mm_get_iommu_bit(struct gk20a *g)
|
||||
{
|
||||
return 34;
|
||||
|
||||
@@ -53,7 +53,7 @@ int gp10b_init_bar2_vm(struct gk20a *g)
|
||||
}
|
||||
|
||||
/* allocate instance mem for bar2 */
|
||||
err = g->ops.mm.alloc_inst_block(g, inst_block);
|
||||
err = nvgpu_alloc_inst_block(g, inst_block);
|
||||
if (err != 0) {
|
||||
goto clean_up_va;
|
||||
}
|
||||
|
||||
@@ -852,7 +852,6 @@ static const struct gpu_ops gm20b_ops = {
|
||||
.vm_bind_channel = nvgpu_vm_bind_channel,
|
||||
.setup_hw = nvgpu_mm_setup_hw,
|
||||
.is_bar1_supported = gm20b_mm_is_bar1_supported,
|
||||
.alloc_inst_block = gk20a_alloc_inst_block,
|
||||
.init_inst_block = gk20a_init_inst_block,
|
||||
.bar1_map_userd = gk20a_mm_bar1_map_userd,
|
||||
.cache = {
|
||||
|
||||
@@ -929,7 +929,6 @@ static const struct gpu_ops gp10b_ops = {
|
||||
.vm_bind_channel = nvgpu_vm_bind_channel,
|
||||
.setup_hw = nvgpu_mm_setup_hw,
|
||||
.is_bar1_supported = gm20b_mm_is_bar1_supported,
|
||||
.alloc_inst_block = gk20a_alloc_inst_block,
|
||||
.init_inst_block = gk20a_init_inst_block,
|
||||
.init_bar2_vm = gp10b_init_bar2_vm,
|
||||
.remove_bar2_vm = gp10b_remove_bar2_vm,
|
||||
|
||||
@@ -1125,7 +1125,6 @@ static const struct gpu_ops gv100_ops = {
|
||||
.vm_bind_channel = nvgpu_vm_bind_channel,
|
||||
.setup_hw = nvgpu_mm_setup_hw,
|
||||
.is_bar1_supported = gv11b_mm_is_bar1_supported,
|
||||
.alloc_inst_block = gk20a_alloc_inst_block,
|
||||
.init_inst_block = gv11b_init_inst_block,
|
||||
.init_bar2_vm = gp10b_init_bar2_vm,
|
||||
.remove_bar2_vm = gp10b_remove_bar2_vm,
|
||||
|
||||
@@ -1095,7 +1095,6 @@ static const struct gpu_ops gv11b_ops = {
|
||||
.vm_bind_channel = nvgpu_vm_bind_channel,
|
||||
.setup_hw = nvgpu_mm_setup_hw,
|
||||
.is_bar1_supported = gv11b_mm_is_bar1_supported,
|
||||
.alloc_inst_block = gk20a_alloc_inst_block,
|
||||
.init_inst_block = gv11b_init_inst_block,
|
||||
.init_bar2_vm = gp10b_init_bar2_vm,
|
||||
.remove_bar2_vm = gp10b_remove_bar2_vm,
|
||||
|
||||
@@ -1162,7 +1162,6 @@ static const struct gpu_ops tu104_ops = {
|
||||
.vm_bind_channel = nvgpu_vm_bind_channel,
|
||||
.setup_hw = nvgpu_mm_setup_hw,
|
||||
.is_bar1_supported = gv11b_mm_is_bar1_supported,
|
||||
.alloc_inst_block = gk20a_alloc_inst_block,
|
||||
.init_inst_block = gv11b_init_inst_block,
|
||||
.init_bar2_vm = gp10b_init_bar2_vm,
|
||||
.remove_bar2_vm = gp10b_remove_bar2_vm,
|
||||
|
||||
@@ -1263,8 +1263,6 @@ struct gpu_ops {
|
||||
bool (*is_bar1_supported)(struct gk20a *g);
|
||||
int (*init_bar2_vm)(struct gk20a *g);
|
||||
void (*remove_bar2_vm)(struct gk20a *g);
|
||||
int (*alloc_inst_block)(struct gk20a *g,
|
||||
struct nvgpu_mem *inst_block);
|
||||
void (*init_inst_block)(struct nvgpu_mem *inst_block,
|
||||
struct vm_gk20a *vm, u32 big_page_size);
|
||||
void (*fault_info_mem_destroy)(struct gk20a *g);
|
||||
|
||||
@@ -182,6 +182,7 @@ void nvgpu_init_mm_ce_context(struct gk20a *g);
|
||||
int nvgpu_init_mm_support(struct gk20a *g);
|
||||
int nvgpu_init_mm_setup_hw(struct gk20a *g);
|
||||
|
||||
int nvgpu_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block);
|
||||
u64 nvgpu_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block);
|
||||
u32 nvgpu_inst_block_ptr(struct gk20a *g, struct nvgpu_mem *inst_block);
|
||||
void nvgpu_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block);
|
||||
|
||||
@@ -24,7 +24,6 @@ gv11b_fb_write_mmu_fault_buffer_size
|
||||
find_first_bit
|
||||
find_first_zero_bit
|
||||
find_next_bit
|
||||
gk20a_alloc_inst_block
|
||||
gk20a_bus_set_bar0_window
|
||||
gk20a_runlist_get_ch_entry
|
||||
gk20a_runlist_get_tsg_entry
|
||||
@@ -64,6 +63,7 @@ nvgpu_alloc_destroy
|
||||
nvgpu_alloc_end
|
||||
nvgpu_alloc_fixed
|
||||
nvgpu_alloc_initialized
|
||||
nvgpu_alloc_inst_block
|
||||
nvgpu_alloc_length
|
||||
nvgpu_alloc_pte
|
||||
nvgpu_alloc_release_carveout
|
||||
|
||||
@@ -299,7 +299,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
g->ops.mm.gmmu.get_default_big_page_size =
|
||||
gp10b_mm_get_default_big_page_size;
|
||||
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
g->ops.mm.alloc_inst_block = gk20a_alloc_inst_block;
|
||||
g->ops.mm.init_inst_block = gv11b_init_inst_block;
|
||||
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
|
||||
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;
|
||||
|
||||
@@ -121,7 +121,6 @@ static int init_mm(struct unit_module *m, struct gk20a *g)
|
||||
g->ops.mm.gmmu.get_default_big_page_size =
|
||||
gp10b_mm_get_default_big_page_size;
|
||||
g->ops.mm.gmmu.get_mmu_levels = gp10b_mm_get_mmu_levels;
|
||||
g->ops.mm.alloc_inst_block = gk20a_alloc_inst_block;
|
||||
g->ops.mm.init_inst_block = gv11b_init_inst_block;
|
||||
g->ops.mm.gmmu.map = nvgpu_gmmu_map_locked;
|
||||
g->ops.mm.gmmu.unmap = nvgpu_gmmu_unmap_locked;
|
||||
@@ -312,10 +311,10 @@ static int test_page_faults_disable_hw(struct unit_module *m, struct gk20a *g,
|
||||
/*
|
||||
* Test: test_page_faults_inst_block.
|
||||
* This test supports 3 types of scenario to cover corner cases:
|
||||
* 0 (default): regular alloc_inst_block with default values
|
||||
* 1: alloc_inst_block with large page size
|
||||
* 2: alloc_inst_block with large page size and set_big_page_size set to NULL to
|
||||
* test a corner case in gv11b_init_inst_block (branch coverage)
|
||||
* 0 (default): regular nvgpu_alloc_inst_block with default values
|
||||
* 1: nvgpu_alloc_inst_block with large page size
|
||||
* 2: nvgpu_alloc_inst_block with large page size and set_big_page_size set to
|
||||
* NULL to test a corner case in gv11b_init_inst_block (branch coverage)
|
||||
*/
|
||||
static int test_page_faults_inst_block(struct unit_module *m, struct gk20a *g,
|
||||
void *args)
|
||||
@@ -334,8 +333,8 @@ static int test_page_faults_inst_block(struct unit_module *m, struct gk20a *g,
|
||||
g->ops.ramin.set_big_page_size = NULL;
|
||||
}
|
||||
|
||||
if (g->ops.mm.alloc_inst_block(g, &inst_blk_desc) != 0) {
|
||||
unit_return_fail(m, "alloc_inst_block failed\n");
|
||||
if (nvgpu_alloc_inst_block(g, &inst_blk_desc) != 0) {
|
||||
unit_return_fail(m, "nvgpu_alloc_inst_block failed\n");
|
||||
}
|
||||
|
||||
g->ops.mm.init_inst_block(&inst_blk_desc, g->mm.bar2.vm, big_page_size);
|
||||
|
||||
Reference in New Issue
Block a user