gpu: nvgpu: use nvgpu list for CE2 ctx list

Use nvgpu list APIs instead of linux list APIs
to store CE2 contexts

Jira NVGPU-13

Change-Id: I0c9b8b69e7e19a63265802abb4455a5cb2308b6f
Signed-off-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-on: http://git-master/r/1454011
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
This commit is contained in:
Deepak Nibade
2017-03-31 16:37:30 +05:30
committed by mobile promotions
parent 42852f182a
commit f0147665b2
2 changed files with 22 additions and 15 deletions

View File

@@ -110,8 +110,8 @@ static void gk20a_ce_notify_all_user(struct gk20a *g, u32 event)
nvgpu_mutex_acquire(&ce_app->app_mutex); nvgpu_mutex_acquire(&ce_app->app_mutex);
list_for_each_entry_safe(ce_ctx, ce_ctx_save, nvgpu_list_for_each_entry_safe(ce_ctx, ce_ctx_save,
&ce_app->allocated_contexts, list) { &ce_app->allocated_contexts, gk20a_gpu_ctx, list) {
if (ce_ctx->user_event_callback) { if (ce_ctx->user_event_callback) {
ce_ctx->user_event_callback(ce_ctx->ctx_id, ce_ctx->user_event_callback(ce_ctx->ctx_id,
event); event);
@@ -187,7 +187,7 @@ static void gk20a_ce_free_command_buffer_stored_fence(struct gk20a_gpu_ctx *ce_c
/* assume this api should need to call under nvgpu_mutex_acquire(&ce_app->app_mutex) */ /* assume this api should need to call under nvgpu_mutex_acquire(&ce_app->app_mutex) */
static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx) static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx)
{ {
struct list_head *list = &ce_ctx->list; struct nvgpu_list_node *list = &ce_ctx->list;
ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_DELETED; ce_ctx->gpu_ctx_state = NVGPU_CE_GPU_CTX_DELETED;
@@ -204,7 +204,7 @@ static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx)
/* housekeeping on app */ /* housekeeping on app */
if (list->prev && list->next) if (list->prev && list->next)
list_del(list); nvgpu_list_del(list);
nvgpu_mutex_release(&ce_ctx->gpu_ctx_mutex); nvgpu_mutex_release(&ce_ctx->gpu_ctx_mutex);
nvgpu_mutex_destroy(&ce_ctx->gpu_ctx_mutex); nvgpu_mutex_destroy(&ce_ctx->gpu_ctx_mutex);
@@ -361,7 +361,7 @@ int gk20a_init_ce_support(struct gk20a *g)
nvgpu_mutex_acquire(&ce_app->app_mutex); nvgpu_mutex_acquire(&ce_app->app_mutex);
INIT_LIST_HEAD(&ce_app->allocated_contexts); nvgpu_init_list_node(&ce_app->allocated_contexts);
ce_app->ctx_count = 0; ce_app->ctx_count = 0;
ce_app->next_ctx_id = 0; ce_app->next_ctx_id = 0;
ce_app->initialised = true; ce_app->initialised = true;
@@ -386,12 +386,12 @@ void gk20a_ce_destroy(struct gk20a *g)
nvgpu_mutex_acquire(&ce_app->app_mutex); nvgpu_mutex_acquire(&ce_app->app_mutex);
list_for_each_entry_safe(ce_ctx, ce_ctx_save, nvgpu_list_for_each_entry_safe(ce_ctx, ce_ctx_save,
&ce_app->allocated_contexts, list) { &ce_app->allocated_contexts, gk20a_gpu_ctx, list) {
gk20a_ce_delete_gpu_context(ce_ctx); gk20a_ce_delete_gpu_context(ce_ctx);
} }
INIT_LIST_HEAD(&ce_app->allocated_contexts); nvgpu_init_list_node(&ce_app->allocated_contexts);
ce_app->ctx_count = 0; ce_app->ctx_count = 0;
ce_app->next_ctx_id = 0; ce_app->next_ctx_id = 0;
@@ -520,7 +520,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev,
nvgpu_mutex_acquire(&ce_app->app_mutex); nvgpu_mutex_acquire(&ce_app->app_mutex);
ctx_id = ce_ctx->ctx_id = ce_app->next_ctx_id; ctx_id = ce_ctx->ctx_id = ce_app->next_ctx_id;
list_add(&ce_ctx->list, &ce_app->allocated_contexts); nvgpu_list_add(&ce_ctx->list, &ce_app->allocated_contexts);
++ce_app->next_ctx_id; ++ce_app->next_ctx_id;
++ce_app->ctx_count; ++ce_app->ctx_count;
nvgpu_mutex_release(&ce_app->app_mutex); nvgpu_mutex_release(&ce_app->app_mutex);
@@ -570,8 +570,8 @@ int gk20a_ce_execute_ops(struct device *dev,
nvgpu_mutex_acquire(&ce_app->app_mutex); nvgpu_mutex_acquire(&ce_app->app_mutex);
list_for_each_entry_safe(ce_ctx, ce_ctx_save, nvgpu_list_for_each_entry_safe(ce_ctx, ce_ctx_save,
&ce_app->allocated_contexts, list) { &ce_app->allocated_contexts, gk20a_gpu_ctx, list) {
if (ce_ctx->ctx_id == ce_ctx_id) { if (ce_ctx->ctx_id == ce_ctx_id) {
found = true; found = true;
break; break;
@@ -706,8 +706,8 @@ void gk20a_ce_delete_context_priv(struct gk20a *g,
nvgpu_mutex_acquire(&ce_app->app_mutex); nvgpu_mutex_acquire(&ce_app->app_mutex);
list_for_each_entry_safe(ce_ctx, ce_ctx_save, nvgpu_list_for_each_entry_safe(ce_ctx, ce_ctx_save,
&ce_app->allocated_contexts, list) { &ce_app->allocated_contexts, gk20a_gpu_ctx, list) {
if (ce_ctx->ctx_id == ce_ctx_id) { if (ce_ctx->ctx_id == ce_ctx_id) {
gk20a_ce_delete_gpu_context(ce_ctx); gk20a_ce_delete_gpu_context(ce_ctx);
--ce_app->ctx_count; --ce_app->ctx_count;

View File

@@ -91,7 +91,7 @@ struct gk20a_ce_app {
struct nvgpu_mutex app_mutex; struct nvgpu_mutex app_mutex;
int app_state; int app_state;
struct list_head allocated_contexts; struct nvgpu_list_node allocated_contexts;
u32 ctx_count; u32 ctx_count;
u32 next_ctx_id; u32 next_ctx_id;
}; };
@@ -112,7 +112,7 @@ struct gk20a_gpu_ctx {
/* cmd buf mem_desc */ /* cmd buf mem_desc */
struct mem_desc cmd_buf_mem; struct mem_desc cmd_buf_mem;
struct list_head list; struct nvgpu_list_node list;
u64 submitted_seq_number; u64 submitted_seq_number;
u64 completed_seq_number; u64 completed_seq_number;
@@ -121,6 +121,13 @@ struct gk20a_gpu_ctx {
u32 cmd_buf_end_queue_offset; u32 cmd_buf_end_queue_offset;
}; };
static inline struct gk20a_gpu_ctx *
gk20a_gpu_ctx_from_list(struct nvgpu_list_node *node)
{
return (struct gk20a_gpu_ctx *)
((uintptr_t)node - offsetof(struct gk20a_gpu_ctx, list));
};
/* global CE app related apis */ /* global CE app related apis */
int gk20a_init_ce_support(struct gk20a *g); int gk20a_init_ce_support(struct gk20a *g);
void gk20a_ce_suspend(struct gk20a *g); void gk20a_ce_suspend(struct gk20a *g);