gpu: nvgpu: drop user callback support in CE

Simplify the copyengine code by deleting support for the
ce_event_callback feature that has never been used. Similarly, create a
channel without the finish callback to get rid of that Linux dependency,
and delete the finish callback function as it now serves no purpose.

Delete also the submitted_seq_number and completed_seq_number fields
that are only written to.

Jira NVGPU-259

Change-Id: I02d15bdcb546f4dd8895a6bfb5130caf88a104e2
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1589320
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: Alex Waterman <alexw@nvidia.com>
Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
This commit is contained in:
Konsta Holtta
2017-10-31 13:07:08 +02:00
committed by mobile promotions
parent 295ade2f1e
commit 760f8dd7fb
4 changed files with 8 additions and 83 deletions

View File

@@ -173,7 +173,6 @@ int gk20a_ce_execute_ops(struct gk20a *g,
/* Next available command buffer queue Index */ /* Next available command buffer queue Index */
++ce_ctx->cmd_buf_read_queue_offset; ++ce_ctx->cmd_buf_read_queue_offset;
++ce_ctx->submitted_seq_number;
} }
} else { } else {
ret = -ENOMEM; ret = -ENOMEM;

View File

@@ -259,12 +259,11 @@ void nvgpu_init_mm_ce_context(struct gk20a *g)
#if defined(CONFIG_GK20A_VIDMEM) #if defined(CONFIG_GK20A_VIDMEM)
if (g->mm.vidmem.size && (g->mm.vidmem.ce_ctx_id == (u32)~0)) { if (g->mm.vidmem.size && (g->mm.vidmem.ce_ctx_id == (u32)~0)) {
g->mm.vidmem.ce_ctx_id = g->mm.vidmem.ce_ctx_id =
gk20a_ce_create_context_with_cb(g, gk20a_ce_create_context(g,
gk20a_fifo_get_fast_ce_runlist_id(g), gk20a_fifo_get_fast_ce_runlist_id(g),
-1, -1,
-1, -1,
-1, -1);
NULL);
if (g->mm.vidmem.ce_ctx_id == (u32)~0) if (g->mm.vidmem.ce_ctx_id == (u32)~0)
nvgpu_err(g, nvgpu_err(g,

View File

@@ -103,54 +103,6 @@ int gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base)
} }
/* static CE app api */ /* static CE app api */
static void gk20a_ce_notify_all_user(struct gk20a *g, u32 event)
{
struct gk20a_ce_app *ce_app = &g->ce_app;
struct gk20a_gpu_ctx *ce_ctx, *ce_ctx_save;
if (!ce_app->initialised)
return;
nvgpu_mutex_acquire(&ce_app->app_mutex);
nvgpu_list_for_each_entry_safe(ce_ctx, ce_ctx_save,
&ce_app->allocated_contexts, gk20a_gpu_ctx, list) {
if (ce_ctx->user_event_callback) {
ce_ctx->user_event_callback(ce_ctx->ctx_id,
event);
}
}
nvgpu_mutex_release(&ce_app->app_mutex);
}
static void gk20a_ce_finished_ctx_cb(struct channel_gk20a *ch, void *data)
{
struct gk20a_gpu_ctx *ce_ctx = data;
bool channel_idle;
u32 event;
channel_gk20a_joblist_lock(ch);
channel_idle = channel_gk20a_joblist_is_empty(ch);
channel_gk20a_joblist_unlock(ch);
if (!channel_idle)
return;
gk20a_dbg(gpu_dbg_fn, "ce: finished %p", ce_ctx);
if (ch->has_timedout)
event = NVGPU_CE_CONTEXT_JOB_TIMEDOUT;
else
event = NVGPU_CE_CONTEXT_JOB_COMPLETED;
if (ce_ctx->user_event_callback)
ce_ctx->user_event_callback(ce_ctx->ctx_id,
event);
++ce_ctx->completed_seq_number;
}
static void gk20a_ce_free_command_buffer_stored_fence(struct gk20a_gpu_ctx *ce_ctx) static void gk20a_ce_free_command_buffer_stored_fence(struct gk20a_gpu_ctx *ce_ctx)
{ {
u32 cmd_buf_index; u32 cmd_buf_index;
@@ -410,7 +362,6 @@ int gk20a_init_ce_support(struct gk20a *g)
if (ce_app->initialised) { if (ce_app->initialised) {
/* assume this happen during poweron/poweroff GPU sequence */ /* assume this happen during poweron/poweroff GPU sequence */
ce_app->app_state = NVGPU_CE_ACTIVE; ce_app->app_state = NVGPU_CE_ACTIVE;
gk20a_ce_notify_all_user(g, NVGPU_CE_CONTEXT_RESUME);
return 0; return 0;
} }
@@ -469,18 +420,16 @@ void gk20a_ce_suspend(struct gk20a *g)
return; return;
ce_app->app_state = NVGPU_CE_SUSPEND; ce_app->app_state = NVGPU_CE_SUSPEND;
gk20a_ce_notify_all_user(g, NVGPU_CE_CONTEXT_SUSPEND);
return; return;
} }
/* CE app utility functions */ /* CE app utility functions */
u32 gk20a_ce_create_context_with_cb(struct gk20a *g, u32 gk20a_ce_create_context(struct gk20a *g,
int runlist_id, int runlist_id,
int priority, int priority,
int timeslice, int timeslice,
int runlist_level, int runlist_level)
ce_event_callback user_event_callback)
{ {
struct gk20a_gpu_ctx *ce_ctx; struct gk20a_gpu_ctx *ce_ctx;
struct gk20a_ce_app *ce_app = &g->ce_app; struct gk20a_ce_app *ce_app = &g->ce_app;
@@ -501,15 +450,11 @@ u32 gk20a_ce_create_context_with_cb(struct gk20a *g,
} }
ce_ctx->g = g; ce_ctx->g = g;
ce_ctx->user_event_callback = user_event_callback;
ce_ctx->cmd_buf_read_queue_offset = 0; ce_ctx->cmd_buf_read_queue_offset = 0;
ce_ctx->cmd_buf_end_queue_offset = ce_ctx->cmd_buf_end_queue_offset =
(NVGPU_CE_COMMAND_BUF_SIZE / NVGPU_CE_MAX_COMMAND_BUFF_SIZE_PER_KICKOFF); (NVGPU_CE_COMMAND_BUF_SIZE / NVGPU_CE_MAX_COMMAND_BUFF_SIZE_PER_KICKOFF);
ce_ctx->submitted_seq_number = 0;
ce_ctx->completed_seq_number = 0;
ce_ctx->vm = g->mm.ce.vm; ce_ctx->vm = g->mm.ce.vm;
if (nvgpu_is_enabled(g, NVGPU_MM_CE_TSG_REQUIRED)) { if (nvgpu_is_enabled(g, NVGPU_MM_CE_TSG_REQUIRED)) {
@@ -523,10 +468,7 @@ u32 gk20a_ce_create_context_with_cb(struct gk20a *g,
} }
/* always kernel client needs privileged channel */ /* always kernel client needs privileged channel */
ce_ctx->ch = gk20a_open_new_channel_with_cb(g, gk20a_ce_finished_ctx_cb, ce_ctx->ch = gk20a_open_new_channel(g, runlist_id, true);
ce_ctx,
runlist_id,
true);
if (!ce_ctx->ch) { if (!ce_ctx->ch) {
nvgpu_err(g, "ce: gk20a channel not available"); nvgpu_err(g, "ce: gk20a channel not available");
goto end; goto end;
@@ -613,7 +555,7 @@ end:
return ctx_id; return ctx_id;
} }
EXPORT_SYMBOL(gk20a_ce_create_context_with_cb); EXPORT_SYMBOL(gk20a_ce_create_context);
void gk20a_ce_delete_context(struct gk20a *g, void gk20a_ce_delete_context(struct gk20a *g,
u32 ce_ctx_id) u32 ce_ctx_id)

View File

@@ -40,8 +40,6 @@ int gk20a_ce2_nonstall_isr(struct gk20a *g, u32 inst_id, u32 pri_base);
#define NVGPU_CE_MAX_COMMAND_BUFF_SIZE_PER_KICKOFF 256 #define NVGPU_CE_MAX_COMMAND_BUFF_SIZE_PER_KICKOFF 256
#define NVGPU_CE_MAX_COMMAND_BUFF_SIZE_FOR_TRACING 8 #define NVGPU_CE_MAX_COMMAND_BUFF_SIZE_FOR_TRACING 8
typedef void (*ce_event_callback)(u32 ce_ctx_id, u32 ce_event_flag);
/* dma launch_flags */ /* dma launch_flags */
enum { enum {
/* location */ /* location */
@@ -69,14 +67,6 @@ enum {
NVGPU_CE_MEMSET = (1 << 1), NVGPU_CE_MEMSET = (1 << 1),
}; };
/* CE event flags */
enum {
NVGPU_CE_CONTEXT_JOB_COMPLETED = (1 << 0),
NVGPU_CE_CONTEXT_JOB_TIMEDOUT = (1 << 1),
NVGPU_CE_CONTEXT_SUSPEND = (1 << 2),
NVGPU_CE_CONTEXT_RESUME = (1 << 3),
};
/* CE app state machine flags */ /* CE app state machine flags */
enum { enum {
NVGPU_CE_ACTIVE = (1 << 0), NVGPU_CE_ACTIVE = (1 << 0),
@@ -106,7 +96,6 @@ struct gk20a_gpu_ctx {
u32 ctx_id; u32 ctx_id;
struct nvgpu_mutex gpu_ctx_mutex; struct nvgpu_mutex gpu_ctx_mutex;
int gpu_ctx_state; int gpu_ctx_state;
ce_event_callback user_event_callback;
/* tsg related data */ /* tsg related data */
struct tsg_gk20a *tsg; struct tsg_gk20a *tsg;
@@ -120,9 +109,6 @@ struct gk20a_gpu_ctx {
struct nvgpu_list_node list; struct nvgpu_list_node list;
u64 submitted_seq_number;
u64 completed_seq_number;
u32 cmd_buf_read_queue_offset; u32 cmd_buf_read_queue_offset;
u32 cmd_buf_end_queue_offset; u32 cmd_buf_end_queue_offset;
}; };
@@ -140,12 +126,11 @@ void gk20a_ce_suspend(struct gk20a *g);
void gk20a_ce_destroy(struct gk20a *g); void gk20a_ce_destroy(struct gk20a *g);
/* CE app utility functions */ /* CE app utility functions */
u32 gk20a_ce_create_context_with_cb(struct gk20a *g, u32 gk20a_ce_create_context(struct gk20a *g,
int runlist_id, int runlist_id,
int priority, int priority,
int timeslice, int timeslice,
int runlist_level, int runlist_level);
ce_event_callback user_event_callback);
int gk20a_ce_execute_ops(struct gk20a *g, int gk20a_ce_execute_ops(struct gk20a *g,
u32 ce_ctx_id, u32 ce_ctx_id,
u64 src_buf, u64 src_buf,