diff --git a/drivers/gpu/nvgpu/common/falcon/engine_dmem_queue.c b/drivers/gpu/nvgpu/common/falcon/engine_dmem_queue.c index 6ffdeb633..1a4a3286d 100644 --- a/drivers/gpu/nvgpu/common/falcon/engine_dmem_queue.c +++ b/drivers/gpu/nvgpu/common/falcon/engine_dmem_queue.c @@ -25,7 +25,6 @@ #include "engine_mem_queue_priv.h" #include "engine_dmem_queue.h" -#include "falcon_priv.h" /* DMEM-Q specific ops */ static int engine_dmem_queue_push(struct nvgpu_falcon *flcn, diff --git a/drivers/gpu/nvgpu/common/falcon/engine_emem_queue.c b/drivers/gpu/nvgpu/common/falcon/engine_emem_queue.c index 0b7fd0ec7..9746ad1ed 100644 --- a/drivers/gpu/nvgpu/common/falcon/engine_emem_queue.c +++ b/drivers/gpu/nvgpu/common/falcon/engine_emem_queue.c @@ -25,7 +25,6 @@ #include "engine_mem_queue_priv.h" #include "engine_emem_queue.h" -#include "falcon_priv.h" /* EMEM-Q specific ops */ static int engine_emem_queue_push(struct nvgpu_falcon *flcn, diff --git a/drivers/gpu/nvgpu/common/falcon/engine_fb_queue.c b/drivers/gpu/nvgpu/common/falcon/engine_fb_queue.c index e723b506d..f89ed701c 100644 --- a/drivers/gpu/nvgpu/common/falcon/engine_fb_queue.c +++ b/drivers/gpu/nvgpu/common/falcon/engine_fb_queue.c @@ -31,25 +31,17 @@ #include #include -#include "falcon_priv.h" #include "engine_fb_queue_priv.h" /* FB-Q ops */ -static int engine_fb_queue_head(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, u32 *head, bool set) +static int engine_fb_queue_head(struct nvgpu_engine_fb_queue *queue, + u32 *head, bool set) { - int err = -EINVAL; - - if (flcn->flcn_engine_dep_ops.queue_head != NULL) { - err = flcn->flcn_engine_dep_ops.queue_head(queue->g, queue->id, - queue->index, head, set); - } - - return err; + return queue->queue_head(queue->g, queue->id, queue->index, head, set); } -static int engine_fb_queue_tail(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, u32 *tail, bool set) +static int engine_fb_queue_tail(struct nvgpu_engine_fb_queue *queue, + u32 *tail, bool set) { struct gk20a *g = queue->g; int err = -EINVAL; @@ -58,23 +50,19 @@ static int engine_fb_queue_tail(struct nvgpu_falcon *flcn, *tail = queue->fbq.tail; err = 0; } else { - if (flcn->flcn_engine_dep_ops.queue_tail != NULL) { - err = flcn->flcn_engine_dep_ops.queue_tail(g, - queue->id, queue->index, tail, set); - } + err = queue->queue_tail(g, queue->id, queue->index, tail, set); } return err; } -static inline u32 engine_fb_queue_get_next(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, u32 head) +static inline u32 engine_fb_queue_get_next(struct nvgpu_engine_fb_queue *queue, + u32 head) { return (head + 1U) % queue->size; } -static bool engine_fb_queue_has_room(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, +static bool engine_fb_queue_has_room(struct nvgpu_engine_fb_queue *queue, u32 size) { u32 head = 0; @@ -82,27 +70,26 @@ static bool engine_fb_queue_has_room(struct nvgpu_falcon *flcn, u32 next_head = 0; int err = 0; - err = queue->head(flcn, queue, &head, QUEUE_GET); + err = queue->head(queue, &head, QUEUE_GET); if (err != 0) { nvgpu_err(queue->g, "queue head GET failed"); goto exit; } - err = queue->tail(flcn, queue, &tail, QUEUE_GET); + err = queue->tail(queue, &tail, QUEUE_GET); if (err != 0) { nvgpu_err(queue->g, "queue tail GET failed"); goto exit; } - next_head = engine_fb_queue_get_next(flcn, queue, head); + next_head = engine_fb_queue_get_next(queue, head); exit: return next_head != tail; } -static int engine_fb_queue_write(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, u32 offset, - u8 *src, u32 size) +static int engine_fb_queue_write(struct nvgpu_engine_fb_queue *queue, + u32 offset, u8 *src, u32 size) { struct gk20a *g = queue->g; struct nv_falcon_fbq_hdr *fb_q_hdr = (struct nv_falcon_fbq_hdr *) @@ -137,7 +124,7 @@ exit: return err; } -static int engine_fb_queue_set_element_use_state(struct nvgpu_falcon *flcn, +static int engine_fb_queue_set_element_use_state( struct nvgpu_engine_fb_queue *queue, u32 queue_pos, bool set) { int err = 0; @@ -166,9 +153,9 @@ exit: return err; } -static int engine_fb_queue_is_element_in_use(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, - u32 queue_pos, bool *in_use) +static int engine_fb_queue_is_element_in_use( + struct nvgpu_engine_fb_queue *queue, + u32 queue_pos, bool *in_use) { int err = 0; @@ -183,8 +170,7 @@ exit: return err; } -static int engine_fb_queue_sweep(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue) +static int engine_fb_queue_sweep(struct nvgpu_engine_fb_queue *queue) { u32 head; u32 tail; @@ -192,7 +178,7 @@ static int engine_fb_queue_sweep(struct nvgpu_falcon *flcn, int err = 0; tail = queue->fbq.tail; - err = queue->head(flcn, queue, &head, QUEUE_GET); + err = queue->head(queue, &head, QUEUE_GET); if (err != 0) { nvgpu_err(queue->g, "flcn-%d queue-%d, position GET failed", queue->flcn_id, queue->id); @@ -205,7 +191,7 @@ static int engine_fb_queue_sweep(struct nvgpu_falcon *flcn, * can be made available. */ while (tail != head) { - if (engine_fb_queue_is_element_in_use(flcn, queue, + if (engine_fb_queue_is_element_in_use(queue, tail, &in_use) != 0) { break; } @@ -214,7 +200,7 @@ static int engine_fb_queue_sweep(struct nvgpu_falcon *flcn, break; } - tail = engine_fb_queue_get_next(flcn, queue, tail); + tail = engine_fb_queue_get_next(queue, tail); } /* Update tail */ @@ -262,12 +248,12 @@ u8 *nvgpu_engine_fb_queue_get_work_buffer(struct nvgpu_engine_fb_queue *queue) return queue->fbq.work_buffer; } -int nvgpu_engine_fb_queue_free_element(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, u32 queue_pos) +int nvgpu_engine_fb_queue_free_element(struct nvgpu_engine_fb_queue *queue, + u32 queue_pos) { int err = 0; - err = engine_fb_queue_set_element_use_state(flcn, queue, + err = engine_fb_queue_set_element_use_state(queue, queue_pos, false); if (err != 0) { nvgpu_err(queue->g, "fb queue elelment %d free failed", @@ -275,35 +261,34 @@ int nvgpu_engine_fb_queue_free_element(struct nvgpu_falcon *flcn, goto exit; } - err = engine_fb_queue_sweep(flcn, queue); + err = engine_fb_queue_sweep(queue); exit: return err; } /* queue is_empty check with lock */ -bool nvgpu_engine_fb_queue_is_empty(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue) +bool nvgpu_engine_fb_queue_is_empty(struct nvgpu_engine_fb_queue *queue) { u32 q_head = 0; u32 q_tail = 0; int err = 0; - if ((flcn == NULL) || (queue == NULL)) { + if (queue == NULL) { return true; } /* acquire mutex */ nvgpu_mutex_acquire(&queue->mutex); - err = queue->head(flcn, queue, &q_head, QUEUE_GET); + err = queue->head(queue, &q_head, QUEUE_GET); if (err != 0) { nvgpu_err(queue->g, "flcn-%d queue-%d, head GET failed", queue->flcn_id, queue->id); goto exit; } - err = queue->tail(flcn, queue, &q_tail, QUEUE_GET); + err = queue->tail(queue, &q_tail, QUEUE_GET); if (err != 0) { nvgpu_err(queue->g, "flcn-%d queue-%d, tail GET failed", queue->flcn_id, queue->id); @@ -317,20 +302,20 @@ exit: return q_head == q_tail; } -static int engine_fb_queue_prepare_write(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, u32 size) +static int engine_fb_queue_prepare_write(struct nvgpu_engine_fb_queue *queue, + u32 size) { int err = 0; /* make sure there's enough free space for the write */ - if (!engine_fb_queue_has_room(flcn, queue, size)) { + if (!engine_fb_queue_has_room(queue, size)) { nvgpu_pmu_dbg(queue->g, "queue full: queue-id %d: index %d", queue->id, queue->index); err = -EAGAIN; goto exit; } - err = queue->head(flcn, queue, &queue->position, QUEUE_GET); + err = queue->head(queue, &queue->position, QUEUE_GET); if (err != 0) { nvgpu_err(queue->g, "flcn-%d queue-%d, position GET failed", queue->flcn_id, queue->id); @@ -342,13 +327,13 @@ exit: } /* queue push operation with lock */ -int nvgpu_engine_fb_queue_push(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, void *data, u32 size) +int nvgpu_engine_fb_queue_push(struct nvgpu_engine_fb_queue *queue, + void *data, u32 size) { struct gk20a *g; int err = 0; - if ((flcn == NULL) || (queue == NULL)) { + if (queue == NULL) { return -EINVAL; } @@ -366,7 +351,7 @@ int nvgpu_engine_fb_queue_push(struct nvgpu_falcon *flcn, /* acquire mutex */ nvgpu_mutex_acquire(&queue->mutex); - err = engine_fb_queue_prepare_write(flcn, queue, size); + err = engine_fb_queue_prepare_write(queue, size); if (err != 0) { goto unlock_mutex; } @@ -378,7 +363,7 @@ int nvgpu_engine_fb_queue_push(struct nvgpu_falcon *flcn, } /* Set queue element in use */ - if (engine_fb_queue_set_element_use_state(flcn, queue, + if (engine_fb_queue_set_element_use_state(queue, queue->position, true) != 0) { nvgpu_err(g, "fb-queue element in use map is in invalid state"); @@ -387,16 +372,16 @@ int nvgpu_engine_fb_queue_push(struct nvgpu_falcon *flcn, } /* write data to FB */ - err = engine_fb_queue_write(flcn, queue, queue->position, data, size); + err = engine_fb_queue_write(queue, queue->position, data, size); if (err != 0) { nvgpu_err(g, "write to fb-queue failed"); goto unlock_mutex; } - queue->position = engine_fb_queue_get_next(flcn, queue, + queue->position = engine_fb_queue_get_next(queue, queue->position); - err = queue->head(flcn, queue, &queue->position, QUEUE_SET); + err = queue->head(queue, &queue->position, QUEUE_SET); if (err != 0) { nvgpu_err(queue->g, "flcn-%d queue-%d, position SET failed", queue->flcn_id, queue->id); @@ -416,16 +401,15 @@ exit: } /* queue pop operation with lock */ -int nvgpu_engine_fb_queue_pop(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, void *data, u32 size, - u32 *bytes_read) +int nvgpu_engine_fb_queue_pop(struct nvgpu_engine_fb_queue *queue, + void *data, u32 size, u32 *bytes_read) { struct gk20a *g; struct pmu_hdr *hdr; u32 entry_offset = 0U; int err = 0; - if ((flcn == NULL) || (queue == NULL)) { + if (queue == NULL) { return -EINVAL; } @@ -444,7 +428,7 @@ int nvgpu_engine_fb_queue_pop(struct nvgpu_falcon *flcn, /* acquire mutex */ nvgpu_mutex_acquire(&queue->mutex); - err = queue->tail(flcn, queue, &queue->position, QUEUE_GET); + err = queue->tail(queue, &queue->position, QUEUE_GET); if (err != 0) { nvgpu_err(g, "flcn-%d queue-%d, position GET failed", queue->flcn_id, queue->id); @@ -495,13 +479,13 @@ int nvgpu_engine_fb_queue_pop(struct nvgpu_falcon *flcn, if (queue->fbq.read_position >= hdr->size) { queue->fbq.read_position = 0U; /* Increment queue index. */ - queue->position = engine_fb_queue_get_next(flcn, queue, + queue->position = engine_fb_queue_get_next(queue, queue->position); } *bytes_read = size; - err = queue->tail(flcn, queue, &queue->position, QUEUE_SET); + err = queue->tail(queue, &queue->position, QUEUE_SET); if (err != 0) { nvgpu_err(g, "flcn-%d queue-%d, position SET failed", queue->flcn_id, queue->id); @@ -520,8 +504,7 @@ exit: return err; } -void nvgpu_engine_fb_queue_free(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue **queue_p) +void nvgpu_engine_fb_queue_free(struct nvgpu_engine_fb_queue **queue_p) { struct nvgpu_engine_fb_queue *queue = NULL; struct gk20a *g; @@ -547,8 +530,7 @@ void nvgpu_engine_fb_queue_free(struct nvgpu_falcon *flcn, *queue_p = NULL; } -int nvgpu_engine_fb_queue_init(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue **queue_p, +int nvgpu_engine_fb_queue_init(struct nvgpu_engine_fb_queue **queue_p, struct nvgpu_engine_fb_queue_params params) { struct nvgpu_engine_fb_queue *queue = NULL; @@ -582,6 +564,9 @@ int nvgpu_engine_fb_queue_init(struct nvgpu_falcon *flcn, queue->position = 0U; + queue->queue_head = params.queue_head; + queue->queue_tail = params.queue_tail; + queue->head = engine_fb_queue_head; queue->tail = engine_fb_queue_tail; diff --git a/drivers/gpu/nvgpu/common/falcon/engine_fb_queue_priv.h b/drivers/gpu/nvgpu/common/falcon/engine_fb_queue_priv.h index ba10c0dec..e62526971 100644 --- a/drivers/gpu/nvgpu/common/falcon/engine_fb_queue_priv.h +++ b/drivers/gpu/nvgpu/common/falcon/engine_fb_queue_priv.h @@ -92,11 +92,15 @@ struct nvgpu_engine_fb_queue { u32 tail; } fbq; - /* queue ops */ - int (*tail)(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, u32 *tail, bool set); - int (*head)(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, u32 *head, bool set); + /* engine and queue specific ops */ + int (*tail)(struct nvgpu_engine_fb_queue *queue, u32 *tail, bool set); + int (*head)(struct nvgpu_engine_fb_queue *queue, u32 *head, bool set); + + /* engine specific ops */ + int (*queue_head)(struct gk20a *g, u32 queue_id, u32 queue_index, + u32 *head, bool set); + int (*queue_tail)(struct gk20a *g, u32 queue_id, u32 queue_index, + u32 *tail, bool set); }; #endif /* NVGPU_ENGINE_FB_QUEUE_PRIV_H */ diff --git a/drivers/gpu/nvgpu/common/falcon/engine_mem_queue.c b/drivers/gpu/nvgpu/common/falcon/engine_mem_queue.c index d183977c7..32052932e 100644 --- a/drivers/gpu/nvgpu/common/falcon/engine_mem_queue.c +++ b/drivers/gpu/nvgpu/common/falcon/engine_mem_queue.c @@ -24,39 +24,12 @@ #include #include "engine_mem_queue_priv.h" -#include "falcon_priv.h" #include "engine_dmem_queue.h" #include "engine_emem_queue.h" /* common falcon queue ops */ -static int engine_mem_queue_head(struct nvgpu_falcon *flcn, - struct nvgpu_engine_mem_queue *queue, u32 *head, bool set) -{ - int err = -EINVAL; - - if (flcn->flcn_engine_dep_ops.queue_head != NULL) { - err = flcn->flcn_engine_dep_ops.queue_head(flcn->g, queue->id, - queue->index, head, set); - } - - return err; -} - -static int engine_mem_queue_tail(struct nvgpu_falcon *flcn, - struct nvgpu_engine_mem_queue *queue, u32 *tail, bool set) -{ - int err = -EINVAL; - - if (flcn->flcn_engine_dep_ops.queue_tail != NULL) { - err = flcn->flcn_engine_dep_ops.queue_tail(flcn->g, queue->id, - queue->index, tail, set); - } - - return err; -} - -static bool engine_mem_queue_has_room(struct nvgpu_falcon *flcn, - struct nvgpu_engine_mem_queue *queue, u32 size, bool *need_rewind) +static bool engine_mem_queue_has_room(struct nvgpu_engine_mem_queue *queue, + u32 size, bool *need_rewind) { u32 q_head = 0; u32 q_tail = 0; @@ -66,13 +39,15 @@ static bool engine_mem_queue_has_room(struct nvgpu_falcon *flcn, size = ALIGN(size, QUEUE_ALIGNMENT); - err = queue->head(flcn, queue, &q_head, QUEUE_GET); + err = queue->head(queue->g, queue->id, queue->index, + &q_head, QUEUE_GET); if (err != 0) { nvgpu_err(queue->g, "queue head GET failed"); goto exit; } - err = queue->tail(flcn, queue, &q_tail, QUEUE_GET); + err = queue->tail(queue->g, queue->id, queue->index, + &q_tail, QUEUE_GET); if (err != 0) { nvgpu_err(queue->g, "queue tail GET failed"); goto exit; @@ -127,7 +102,7 @@ static int engine_mem_queue_rewind(struct nvgpu_falcon *flcn, queue->position = queue->offset; if (queue->oflag == OFLAG_READ) { - err = queue->tail(flcn, queue, &queue->position, + err = queue->tail(g, queue->id, queue->index, &queue->position, QUEUE_SET); if (err != 0) { nvgpu_err(g, "flcn-%d queue-%d, position SET failed", @@ -147,14 +122,15 @@ static int engine_mem_queue_prepare_write(struct nvgpu_falcon *flcn, int err = 0; /* make sure there's enough free space for the write */ - if (!engine_mem_queue_has_room(flcn, queue, size, &q_rewind)) { + if (!engine_mem_queue_has_room(queue, size, &q_rewind)) { nvgpu_pmu_dbg(queue->g, "queue full: queue-id %d: index %d", queue->id, queue->index); err = -EAGAIN; goto exit; } - err = queue->head(flcn, queue, &queue->position, QUEUE_GET); + err = queue->head(queue->g, queue->id, queue->index, + &queue->position, QUEUE_GET); if (err != 0) { nvgpu_err(queue->g, "flcn-%d queue-%d, position GET failed", queue->flcn_id, queue->id); @@ -208,7 +184,8 @@ int nvgpu_engine_mem_queue_push(struct nvgpu_falcon *flcn, queue->position += ALIGN(size, QUEUE_ALIGNMENT); - err = queue->head(flcn, queue, &queue->position, QUEUE_SET); + err = queue->head(g, queue->id, queue->index, + &queue->position, QUEUE_SET); if (err != 0) { nvgpu_err(g, "flcn-%d queue-%d, position SET failed", queue->flcn_id, queue->id); @@ -250,14 +227,15 @@ int nvgpu_engine_mem_queue_pop(struct nvgpu_falcon *flcn, /* acquire mutex */ nvgpu_mutex_acquire(&queue->mutex); - err = queue->head(flcn, queue, &q_head, QUEUE_GET); + err = queue->head(g, queue->id, queue->index, &q_head, QUEUE_GET); if (err != 0) { nvgpu_err(g, "flcn-%d, queue-%d, head GET failed", queue->flcn_id, queue->id); goto unlock_mutex; } - err = queue->tail(flcn, queue, &queue->position, QUEUE_GET); + err = queue->tail(g, queue->id, queue->index, + &queue->position, QUEUE_GET); if (err != 0) { nvgpu_err(g, "flcn-%d queue-%d, position GET failed", queue->flcn_id, queue->id); @@ -288,7 +266,8 @@ int nvgpu_engine_mem_queue_pop(struct nvgpu_falcon *flcn, queue->position += ALIGN(size, QUEUE_ALIGNMENT); - err = queue->tail(flcn, queue, &queue->position, QUEUE_SET); + err = queue->tail(g, queue->id, queue->index, + &queue->position, QUEUE_SET); if (err != 0) { nvgpu_err(g, "flcn-%d queue-%d, position SET failed", queue->flcn_id, queue->id); @@ -325,15 +304,14 @@ int nvgpu_engine_mem_queue_rewind(struct nvgpu_falcon *flcn, } /* queue is_empty check with lock */ -bool nvgpu_engine_mem_queue_is_empty(struct nvgpu_falcon *flcn, - struct nvgpu_engine_mem_queue *queue) +bool nvgpu_engine_mem_queue_is_empty(struct nvgpu_engine_mem_queue *queue) { struct gk20a *g; u32 q_head = 0; u32 q_tail = 0; int err = 0; - if ((flcn == NULL) || (queue == NULL)) { + if (queue == NULL) { return true; } @@ -342,14 +320,14 @@ bool nvgpu_engine_mem_queue_is_empty(struct nvgpu_falcon *flcn, /* acquire mutex */ nvgpu_mutex_acquire(&queue->mutex); - err = queue->head(flcn, queue, &q_head, QUEUE_GET); + err = queue->head(g, queue->id, queue->index, &q_head, QUEUE_GET); if (err != 0) { nvgpu_err(g, "flcn-%d queue-%d, head GET failed", queue->flcn_id, queue->id); goto exit; } - err = queue->tail(flcn, queue, &q_tail, QUEUE_GET); + err = queue->tail(g, queue->id, queue->index, &q_tail, QUEUE_GET); if (err != 0) { nvgpu_err(g, "flcn-%d queue-%d, tail GET failed", queue->flcn_id, queue->id); @@ -363,8 +341,7 @@ exit: return q_head == q_tail; } -void nvgpu_engine_mem_queue_free(struct nvgpu_falcon *flcn, - struct nvgpu_engine_mem_queue **queue_p) +void nvgpu_engine_mem_queue_free(struct nvgpu_engine_mem_queue **queue_p) { struct nvgpu_engine_mem_queue *queue = NULL; struct gk20a *g; @@ -392,8 +369,7 @@ u32 nvgpu_engine_mem_queue_get_size(struct nvgpu_engine_mem_queue *queue) return queue->size; } -int nvgpu_engine_mem_queue_init(struct nvgpu_falcon *flcn, - struct nvgpu_engine_mem_queue **queue_p, +int nvgpu_engine_mem_queue_init(struct nvgpu_engine_mem_queue **queue_p, struct nvgpu_engine_mem_queue_params params) { struct nvgpu_engine_mem_queue *queue = NULL; @@ -421,8 +397,8 @@ int nvgpu_engine_mem_queue_init(struct nvgpu_falcon *flcn, queue->oflag = params.oflag; queue->queue_type = params.queue_type; - queue->head = engine_mem_queue_head; - queue->tail = engine_mem_queue_tail; + queue->head = params.queue_head; + queue->tail = params.queue_tail; nvgpu_log(g, gpu_dbg_pmu, "flcn id-%d q-id %d: index %d, offset 0x%08x, size 0x%08x", diff --git a/drivers/gpu/nvgpu/common/falcon/engine_mem_queue_priv.h b/drivers/gpu/nvgpu/common/falcon/engine_mem_queue_priv.h index b307f1c94..735406b21 100644 --- a/drivers/gpu/nvgpu/common/falcon/engine_mem_queue_priv.h +++ b/drivers/gpu/nvgpu/common/falcon/engine_mem_queue_priv.h @@ -61,10 +61,10 @@ struct nvgpu_engine_mem_queue { u32 src, void *data, u32 size); /* engine specific ops */ - int (*head)(struct nvgpu_falcon *flcn, - struct nvgpu_engine_mem_queue *queue, u32 *head, bool set); - int (*tail)(struct nvgpu_falcon *flcn, - struct nvgpu_engine_mem_queue *queue, u32 *tail, bool set); + int (*head)(struct gk20a *g, u32 queue_id, u32 queue_index, + u32 *head, bool set); + int (*tail)(struct gk20a *g, u32 queue_id, u32 queue_index, + u32 *tail, bool set); }; #endif /* NVGPU_ENGINE_MEM_QUEUE_PRIV_H */ diff --git a/drivers/gpu/nvgpu/common/falcon/falcon_gk20a.c b/drivers/gpu/nvgpu/common/falcon/falcon_gk20a.c index ae02ba74d..d5a16c33f 100644 --- a/drivers/gpu/nvgpu/common/falcon/falcon_gk20a.c +++ b/drivers/gpu/nvgpu/common/falcon/falcon_gk20a.c @@ -593,8 +593,6 @@ static void gk20a_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn) switch (flcn->flcn_id) { case FALCON_ID_PMU: flcn_eng_dep_ops->reset_eng = g->ops.pmu.pmu_reset; - flcn_eng_dep_ops->queue_head = g->ops.pmu.pmu_queue_head; - flcn_eng_dep_ops->queue_tail = g->ops.pmu.pmu_queue_tail; break; default: /* NULL assignment make sure diff --git a/drivers/gpu/nvgpu/common/falcon/falcon_gp106.c b/drivers/gpu/nvgpu/common/falcon/falcon_gp106.c index 4366db2d4..25a425d31 100644 --- a/drivers/gpu/nvgpu/common/falcon/falcon_gp106.c +++ b/drivers/gpu/nvgpu/common/falcon/falcon_gp106.c @@ -34,8 +34,6 @@ static void gp106_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn) switch (flcn->flcn_id) { case FALCON_ID_PMU: flcn_eng_dep_ops->reset_eng = g->ops.pmu.pmu_reset; - flcn_eng_dep_ops->queue_head = g->ops.pmu.pmu_queue_head; - flcn_eng_dep_ops->queue_tail = g->ops.pmu.pmu_queue_tail; break; case FALCON_ID_SEC2: flcn_eng_dep_ops->reset_eng = g->ops.sec2.sec2_reset; diff --git a/drivers/gpu/nvgpu/common/falcon/falcon_priv.h b/drivers/gpu/nvgpu/common/falcon/falcon_priv.h index ab3ef4c4f..7572d038f 100644 --- a/drivers/gpu/nvgpu/common/falcon/falcon_priv.h +++ b/drivers/gpu/nvgpu/common/falcon/falcon_priv.h @@ -73,10 +73,6 @@ enum falcon_mem_type { /* ops which are falcon engine specific */ struct nvgpu_falcon_engine_dependency_ops { int (*reset_eng)(struct gk20a *g); - int (*queue_head)(struct gk20a *g, u32 queue_id, u32 queue_index, - u32 *head, bool set); - int (*queue_tail)(struct gk20a *g, u32 queue_id, u32 queue_index, - u32 *tail, bool set); int (*copy_from_emem)(struct nvgpu_falcon *flcn, u32 src, u8 *dst, u32 size, u8 port); int (*copy_to_emem)(struct nvgpu_falcon *flcn, u32 dst, u8 *src, diff --git a/drivers/gpu/nvgpu/common/falcon/falcon_tu104.c b/drivers/gpu/nvgpu/common/falcon/falcon_tu104.c index bcbf7537a..72011f248 100644 --- a/drivers/gpu/nvgpu/common/falcon/falcon_tu104.c +++ b/drivers/gpu/nvgpu/common/falcon/falcon_tu104.c @@ -38,8 +38,6 @@ static void tu104_falcon_engine_dependency_ops(struct nvgpu_falcon *flcn) flcn_eng_dep_ops->copy_to_emem = g->ops.sec2.sec2_copy_to_emem; flcn_eng_dep_ops->copy_from_emem = g->ops.sec2.sec2_copy_from_emem; - flcn_eng_dep_ops->queue_head = g->ops.sec2.sec2_queue_head; - flcn_eng_dep_ops->queue_tail = g->ops.sec2.sec2_queue_tail; break; default: flcn_eng_dep_ops->reset_eng = NULL; diff --git a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c index 84535a676..53de5574b 100644 --- a/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c +++ b/drivers/gpu/nvgpu/common/pmu/pmu_ipc.c @@ -165,6 +165,8 @@ int nvgpu_pmu_queue_init_fb(struct nvgpu_pmu *pmu, params.flcn_id = FALCON_ID_PMU; params.id = id; params.oflag = oflag; + params.queue_head = g->ops.pmu.pmu_queue_head; + params.queue_tail = g->ops.pmu.pmu_queue_tail; if (tmp_id == PMU_COMMAND_QUEUE_HPQ) { tmp_id = PMU_QUEUE_HPQ_IDX_FOR_V3; @@ -178,7 +180,7 @@ int nvgpu_pmu_queue_init_fb(struct nvgpu_pmu *pmu, } params.index = init->v5.queue_index[tmp_id]; - err = nvgpu_engine_fb_queue_init(pmu->flcn, &pmu->fb_queue[id], params); + err = nvgpu_engine_fb_queue_init(&pmu->fb_queue[id], params); if (err != 0) { nvgpu_err(g, "queue-%d init failed", id); } @@ -221,12 +223,14 @@ int nvgpu_pmu_queue_init(struct nvgpu_pmu *pmu, params.flcn_id = FALCON_ID_PMU; params.id = id; params.oflag = oflag; + params.queue_head = g->ops.pmu.pmu_queue_head; + params.queue_tail = g->ops.pmu.pmu_queue_tail; params.queue_type = QUEUE_TYPE_DMEM; g->ops.pmu_ver.get_pmu_init_msg_pmu_queue_params(id, init, ¶ms.index, ¶ms.offset, ¶ms.size); - err = nvgpu_engine_mem_queue_init(pmu->flcn, &pmu->queue[id], params); + err = nvgpu_engine_mem_queue_init(&pmu->queue[id], params); if (err != 0) { nvgpu_err(g, "queue-%d init failed", id); } @@ -249,13 +253,13 @@ void nvgpu_pmu_queue_free(struct nvgpu_pmu *pmu, u32 id) goto exit; } - nvgpu_engine_fb_queue_free(pmu->flcn, &pmu->fb_queue[id]); + nvgpu_engine_fb_queue_free(&pmu->fb_queue[id]); } else { if (pmu->queue[id] == NULL) { goto exit; } - nvgpu_engine_mem_queue_free(pmu->flcn, &pmu->queue[id]); + nvgpu_engine_mem_queue_free(&pmu->queue[id]); } exit: @@ -369,7 +373,7 @@ static int pmu_write_cmd(struct nvgpu_pmu *pmu, struct pmu_cmd *cmd, do { if (pmu->queue_type == QUEUE_TYPE_FB) { fb_queue = pmu->fb_queue[queue_id]; - err = nvgpu_engine_fb_queue_push(pmu->flcn, fb_queue, + err = nvgpu_engine_fb_queue_push(fb_queue, cmd, cmd->hdr.size); } else { queue = pmu->queue[queue_id]; @@ -892,7 +896,7 @@ static void pmu_payload_fbq_free(struct nvgpu_pmu *pmu, * set FBQ element work buffer to NULL * Clear the in use bit for the queue entry this CMD used. */ - nvgpu_engine_fb_queue_free_element(pmu->flcn, seq->cmd_queue, + nvgpu_engine_fb_queue_free_element(seq->cmd_queue, seq->fbq_element_index); } @@ -1060,7 +1064,7 @@ static bool pmu_engine_mem_queue_read(struct nvgpu_pmu *pmu, if (pmu->queue_type == QUEUE_TYPE_FB) { fb_queue = pmu->fb_queue[queue_id]; - err = nvgpu_engine_fb_queue_pop(pmu->flcn, fb_queue, data, + err = nvgpu_engine_fb_queue_pop(fb_queue, data, bytes_to_read, &bytes_read); } else { queue = pmu->queue[queue_id]; @@ -1091,10 +1095,10 @@ bool nvgpu_pmu_queue_is_empty(struct nvgpu_pmu *pmu, u32 queue_id) if (pmu->queue_type == QUEUE_TYPE_FB) { fb_queue = pmu->fb_queue[queue_id]; - empty = nvgpu_engine_fb_queue_is_empty(pmu->flcn, fb_queue); + empty = nvgpu_engine_fb_queue_is_empty(fb_queue); } else { queue = pmu->queue[queue_id]; - empty = nvgpu_engine_mem_queue_is_empty(pmu->flcn, queue); + empty = nvgpu_engine_mem_queue_is_empty(queue); } return empty; diff --git a/drivers/gpu/nvgpu/common/sec2/sec2.c b/drivers/gpu/nvgpu/common/sec2/sec2.c index 1890ac32e..558bd38a1 100644 --- a/drivers/gpu/nvgpu/common/sec2/sec2.c +++ b/drivers/gpu/nvgpu/common/sec2/sec2.c @@ -68,10 +68,11 @@ int nvgpu_sec2_queue_init(struct nvgpu_sec2 *sec2, u32 id, params.position = init->q_info[id].queue_offset; params.size = init->q_info[id].queue_size; params.oflag = oflag; + params.queue_head = g->ops.sec2.sec2_queue_head; + params.queue_tail = g->ops.sec2.sec2_queue_tail; params.queue_type = QUEUE_TYPE_EMEM; - err = nvgpu_engine_mem_queue_init(sec2->flcn, - &sec2->queue[queue_log_id], + err = nvgpu_engine_mem_queue_init(&sec2->queue[queue_log_id], params); if (err != 0) { nvgpu_err(g, "queue-%d init failed", queue_log_id); @@ -94,7 +95,7 @@ void nvgpu_sec2_queue_free(struct nvgpu_sec2 *sec2, u32 id) goto exit; } - nvgpu_engine_mem_queue_free(sec2->flcn, &sec2->queue[id]); + nvgpu_engine_mem_queue_free(&sec2->queue[id]); exit: return; } diff --git a/drivers/gpu/nvgpu/common/sec2/sec2_ipc.c b/drivers/gpu/nvgpu/common/sec2/sec2_ipc.c index adeef8d8b..2dee4f56d 100644 --- a/drivers/gpu/nvgpu/common/sec2/sec2_ipc.c +++ b/drivers/gpu/nvgpu/common/sec2/sec2_ipc.c @@ -276,7 +276,7 @@ static bool sec2_read_message(struct nvgpu_sec2 *sec2, *status = 0U; - if (nvgpu_engine_mem_queue_is_empty(sec2->flcn, queue)) { + if (nvgpu_engine_mem_queue_is_empty(queue)) { return false; } diff --git a/drivers/gpu/nvgpu/include/nvgpu/engine_fb_queue.h b/drivers/gpu/nvgpu/include/nvgpu/engine_fb_queue.h index b9d13c93b..0d67f663a 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/engine_fb_queue.h +++ b/drivers/gpu/nvgpu/include/nvgpu/engine_fb_queue.h @@ -26,7 +26,6 @@ #include struct gk20a; -struct nvgpu_falcon; struct nvgpu_engine_fb_queue; struct nvgpu_engine_fb_queue_params { @@ -51,27 +50,29 @@ struct nvgpu_engine_fb_queue_params { /* Holds super surface base address */ struct nvgpu_mem *super_surface_mem; + + /* engine specific ops */ + int (*queue_head)(struct gk20a *g, u32 queue_id, u32 queue_index, + u32 *head, bool set); + int (*queue_tail)(struct gk20a *g, u32 queue_id, u32 queue_index, + u32 *tail, bool set); }; /* queue public functions */ -int nvgpu_engine_fb_queue_init(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue **queue_p, +int nvgpu_engine_fb_queue_init(struct nvgpu_engine_fb_queue **queue_p, struct nvgpu_engine_fb_queue_params params); -bool nvgpu_engine_fb_queue_is_empty(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue); -int nvgpu_engine_fb_queue_pop(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, void *data, u32 size, - u32 *bytes_read); -int nvgpu_engine_fb_queue_push(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, void *data, u32 size); -void nvgpu_engine_fb_queue_free(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue **queue_p); +bool nvgpu_engine_fb_queue_is_empty(struct nvgpu_engine_fb_queue *queue); +int nvgpu_engine_fb_queue_pop(struct nvgpu_engine_fb_queue *queue, + void *data, u32 size, u32 *bytes_read); +int nvgpu_engine_fb_queue_push(struct nvgpu_engine_fb_queue *queue, + void *data, u32 size); +void nvgpu_engine_fb_queue_free(struct nvgpu_engine_fb_queue **queue_p); u32 nvgpu_engine_fb_queue_get_position(struct nvgpu_engine_fb_queue *queue); u32 nvgpu_engine_fb_queue_get_element_size(struct nvgpu_engine_fb_queue *queue); u32 nvgpu_engine_fb_queue_get_offset(struct nvgpu_engine_fb_queue *queue); u8 *nvgpu_engine_fb_queue_get_work_buffer(struct nvgpu_engine_fb_queue *queue); -int nvgpu_engine_fb_queue_free_element(struct nvgpu_falcon *flcn, - struct nvgpu_engine_fb_queue *queue, u32 queue_pos); +int nvgpu_engine_fb_queue_free_element(struct nvgpu_engine_fb_queue *queue, + u32 queue_pos); void nvgpu_engine_fb_queue_lock_work_buffer( struct nvgpu_engine_fb_queue *queue); void nvgpu_engine_fb_queue_unlock_work_buffer( diff --git a/drivers/gpu/nvgpu/include/nvgpu/engine_mem_queue.h b/drivers/gpu/nvgpu/include/nvgpu/engine_mem_queue.h index d32c6936c..402183842 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/engine_mem_queue.h +++ b/drivers/gpu/nvgpu/include/nvgpu/engine_mem_queue.h @@ -51,14 +51,18 @@ struct nvgpu_engine_mem_queue_params { u32 size; /* open-flag */ u32 oflag; + + /* engine specific ops */ + int (*queue_head)(struct gk20a *g, u32 queue_id, u32 queue_index, + u32 *head, bool set); + int (*queue_tail)(struct gk20a *g, u32 queue_id, u32 queue_index, + u32 *tail, bool set); }; /* queue public functions */ -int nvgpu_engine_mem_queue_init(struct nvgpu_falcon *flcn, - struct nvgpu_engine_mem_queue **queue_p, +int nvgpu_engine_mem_queue_init(struct nvgpu_engine_mem_queue **queue_p, struct nvgpu_engine_mem_queue_params params); -bool nvgpu_engine_mem_queue_is_empty(struct nvgpu_falcon *flcn, - struct nvgpu_engine_mem_queue *queue); +bool nvgpu_engine_mem_queue_is_empty(struct nvgpu_engine_mem_queue *queue); int nvgpu_engine_mem_queue_rewind(struct nvgpu_falcon *flcn, struct nvgpu_engine_mem_queue *queue); int nvgpu_engine_mem_queue_pop(struct nvgpu_falcon *flcn, @@ -66,8 +70,7 @@ int nvgpu_engine_mem_queue_pop(struct nvgpu_falcon *flcn, u32 *bytes_read); int nvgpu_engine_mem_queue_push(struct nvgpu_falcon *flcn, struct nvgpu_engine_mem_queue *queue, void *data, u32 size); -void nvgpu_engine_mem_queue_free(struct nvgpu_falcon *flcn, - struct nvgpu_engine_mem_queue **queue_p); +void nvgpu_engine_mem_queue_free(struct nvgpu_engine_mem_queue **queue_p); u32 nvgpu_engine_mem_queue_get_size(struct nvgpu_engine_mem_queue *queue); #endif /* NVGPU_ENGINE_MEM_QUEUE_H */ diff --git a/drivers/gpu/nvgpu/tu104/sec2_tu104.c b/drivers/gpu/nvgpu/tu104/sec2_tu104.c index f9e895688..500d666ef 100644 --- a/drivers/gpu/nvgpu/tu104/sec2_tu104.c +++ b/drivers/gpu/nvgpu/tu104/sec2_tu104.c @@ -419,7 +419,7 @@ void tu104_sec2_isr(struct gk20a *g) if (recheck) { queue = sec2->queue[SEC2_NV_MSGQ_LOG_ID]; - if (!nvgpu_engine_mem_queue_is_empty(sec2->flcn, queue)) { + if (!nvgpu_engine_mem_queue_is_empty(queue)) { gk20a_writel(g, psec_falcon_irqsset_r(), psec_falcon_irqsset_swgen0_set_f()); }