gpu: nvgu: use API to check for valid memory

Use API nvgpu_mem_is_valid() to check if buffers are allocated or not
instead of directly accessing linux specific sg_table

Jira NVGPU-416

Change-Id: I83da79f4a57ec5a765f32c69bf76e708753e11fb
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1604587
Reviewed-by: Automatic_Commit_Validation_User
GVS: Gerrit_Virtual_Submit
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Deepak Nibade
2017-11-24 07:05:40 -08:00
committed by mobile promotions
parent 75ebe51113
commit 5b41eb839a

View File

@@ -2613,13 +2613,13 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
gk20a_dbg_fn("");
/* Circular Buffer */
if (!c->vpr ||
(gr->global_ctx_buffer[CIRCULAR_VPR].mem.priv.sgt == NULL)) {
mem = &gr->global_ctx_buffer[CIRCULAR].mem;
g_bfr_index[CIRCULAR_VA] = CIRCULAR;
} else {
if (c->vpr &&
nvgpu_mem_is_valid(&gr->global_ctx_buffer[CIRCULAR_VPR].mem)) {
mem = &gr->global_ctx_buffer[CIRCULAR_VPR].mem;
g_bfr_index[CIRCULAR_VA] = CIRCULAR_VPR;
} else {
mem = &gr->global_ctx_buffer[CIRCULAR].mem;
g_bfr_index[CIRCULAR_VA] = CIRCULAR;
}
gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
@@ -2631,13 +2631,13 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
g_bfr_size[CIRCULAR_VA] = mem->size;
/* Attribute Buffer */
if (!c->vpr ||
(gr->global_ctx_buffer[ATTRIBUTE_VPR].mem.priv.sgt == NULL)) {
mem = &gr->global_ctx_buffer[ATTRIBUTE].mem;
g_bfr_index[ATTRIBUTE_VA] = ATTRIBUTE;
} else {
if (c->vpr &&
nvgpu_mem_is_valid(&gr->global_ctx_buffer[ATTRIBUTE_VPR].mem)) {
mem = &gr->global_ctx_buffer[ATTRIBUTE_VPR].mem;
g_bfr_index[ATTRIBUTE_VA] = ATTRIBUTE_VPR;
} else {
mem = &gr->global_ctx_buffer[ATTRIBUTE].mem;
g_bfr_index[ATTRIBUTE_VA] = ATTRIBUTE;
}
gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
@@ -2649,13 +2649,13 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
g_bfr_size[ATTRIBUTE_VA] = mem->size;
/* Page Pool */
if (!c->vpr ||
(gr->global_ctx_buffer[PAGEPOOL_VPR].mem.priv.sgt == NULL)) {
mem = &gr->global_ctx_buffer[PAGEPOOL].mem;
g_bfr_index[PAGEPOOL_VA] = PAGEPOOL;
} else {
if (c->vpr &&
nvgpu_mem_is_valid(&gr->global_ctx_buffer[PAGEPOOL_VPR].mem)) {
mem = &gr->global_ctx_buffer[PAGEPOOL_VPR].mem;
g_bfr_index[PAGEPOOL_VA] = PAGEPOOL_VPR;
} else {
mem = &gr->global_ctx_buffer[PAGEPOOL].mem;
g_bfr_index[PAGEPOOL_VA] = PAGEPOOL;
}
gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
@@ -2960,7 +2960,7 @@ int gk20a_alloc_obj_ctx(struct channel_gk20a *c, u32 class_num, u32 flags)
}
/* allocate patch buffer */
if (ch_ctx->patch_ctx.mem.priv.sgt == NULL) {
if (!nvgpu_mem_is_valid(&ch_ctx->patch_ctx.mem)) {
ch_ctx->patch_ctx.data_count = 0;
err = gr_gk20a_alloc_channel_patch_ctx(g, c);
if (err) {