mirror of
git://nv-tegra.nvidia.com/linux-nvgpu.git
synced 2025-12-24 10:34:43 +03:00
gpu: nvgpu: Update runlist_update() to take runlist ptr
Update the nvgpu_runlist_update_for_channel() function:
- Rename it to nvgpu_runlist_update()
- Have it take a pointer to the runlist to update instead
of a runlist ID. For the most part this makes the code
better but there's a few places where it's worse (for
now).
This starts the slow and painful process of moving away from
the non-runlist code using runlist IDs in many places it should
not.
Most of this patch is just fixing compilation problems with
the minor header updates.
JIRA NVGPU-6425
Change-Id: Id9885fe655d1d750625a1c8aceda9e67a2cbdb7a
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvgpu/+/2470304
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: Deepak Nibade <dnibade@nvidia.com>
Reviewed-by: svc-mobile-cert <svc-mobile-cert@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
GVS: Gerrit_Virtual_Submit
This commit is contained in:
committed by
mobile promotions
parent
fae1f20ab7
commit
77c0b9ffdc
@@ -50,6 +50,7 @@ struct priv_cmd_queue;
|
||||
struct priv_cmd_entry;
|
||||
struct nvgpu_channel_wdt;
|
||||
struct nvgpu_user_fence;
|
||||
struct nvgpu_runlist;
|
||||
|
||||
/**
|
||||
* S/W defined invalid channel identifier.
|
||||
@@ -488,8 +489,8 @@ struct nvgpu_channel {
|
||||
*/
|
||||
u32 runqueue_sel;
|
||||
|
||||
/** Identifer of the runlist the channel will run on */
|
||||
u32 runlist_id;
|
||||
/** Runlist the channel will run on. */
|
||||
struct nvgpu_runlist *runlist;
|
||||
|
||||
/**
|
||||
* Recovery path can be entered twice for the same error in
|
||||
|
||||
@@ -65,12 +65,12 @@ struct gops_runlist {
|
||||
* @retval -E2BIG in case there are not enough entries in the runlist
|
||||
* buffer to accommodate all active channels/TSGs.
|
||||
*/
|
||||
int (*reload)(struct gk20a *g, u32 runlist_id,
|
||||
int (*reload)(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
bool add, bool wait_for_finish);
|
||||
|
||||
/** @cond DOXYGEN_SHOULD_SKIP_THIS */
|
||||
|
||||
int (*update_for_channel)(struct gk20a *g, u32 runlist_id,
|
||||
int (*update)(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
struct nvgpu_channel *ch, bool add,
|
||||
bool wait_for_finish);
|
||||
u32 (*count_max)(struct gk20a *g);
|
||||
|
||||
@@ -142,7 +142,7 @@ u32 nvgpu_runlist_construct_locked(struct nvgpu_fifo *f,
|
||||
*
|
||||
* When #ch is NULL, this function has same behavior as #nvgpu_runlist_reload.
|
||||
* When #ch is non NULL, this function has same behavior as
|
||||
* #nvgpu_runlist_update_for_channel.
|
||||
* #nvgpu_runlist_update.
|
||||
*
|
||||
* The only difference with #nvgpu_runlist_reload is that the caller already
|
||||
* holds the runlist_lock before calling this function.
|
||||
@@ -185,7 +185,7 @@ int nvgpu_runlist_reschedule(struct nvgpu_channel *ch, bool preempt_next,
|
||||
* @retval -E2BIG in case there are not enough entries in runlist buffer to
|
||||
* accommodate all active channels/TSGs.
|
||||
*/
|
||||
int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
|
||||
int nvgpu_runlist_update(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
struct nvgpu_channel *ch, bool add, bool wait_for_finish);
|
||||
|
||||
/**
|
||||
@@ -211,7 +211,7 @@ int nvgpu_runlist_update_for_channel(struct gk20a *g, u32 runlist_id,
|
||||
* @retval -E2BIG in case there are not enough entries in the runlist buffer
|
||||
* to accommodate all active channels/TSGs.
|
||||
*/
|
||||
int nvgpu_runlist_reload(struct gk20a *g, u32 runlist_id,
|
||||
int nvgpu_runlist_reload(struct gk20a *g, struct nvgpu_runlist *rl,
|
||||
bool add, bool wait_for_finish);
|
||||
|
||||
/**
|
||||
|
||||
Reference in New Issue
Block a user