mirror of
git://nv-tegra.nvidia.com/linux-nv-oot.git
synced 2025-12-22 17:25:35 +03:00
dmaengine: tegra: Add GPCDMA driver as OOT
Add GPCDMA driver as OOT Bug 3631204 Signed-off-by: Akhil R <akhilrajeev@nvidia.com> Change-Id: I7282fece688542c3bcf03bf61f0c3dcefa94c554 Reviewed-on: https://git-master.nvidia.com/r/c/linux-nv-oot/+/2747499 Reviewed-by: Bitan Biswas <bbiswas@nvidia.com> GVS: Gerrit_Virtual_Submit <buildbot_gerritrpt@nvidia.com>
This commit is contained in:
committed by
mobile promotions
parent
e819643f9a
commit
db30560ec0
@@ -4,6 +4,7 @@
|
||||
LINUXINCLUDE += -I$(srctree.nvidia-oot)/include
|
||||
|
||||
obj-m += devfreq/
|
||||
obj-m += dma/
|
||||
obj-m += gpu/
|
||||
obj-m += hwmon/
|
||||
obj-m += i2c/busses/
|
||||
|
||||
1
drivers/dma/Makefile
Normal file
1
drivers/dma/Makefile
Normal file
@@ -0,0 +1 @@
|
||||
obj-m += tegra186-gpc-dma.o
|
||||
201
drivers/dma/dmaengine.h
Normal file
201
drivers/dma/dmaengine.h
Normal file
@@ -0,0 +1,201 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* The contents of this file are private to DMA engine drivers, and is not
|
||||
* part of the API to be used by DMA engine users.
|
||||
*/
|
||||
#ifndef DMAENGINE_H
|
||||
#define DMAENGINE_H
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
/**
|
||||
* dma_cookie_init - initialize the cookies for a DMA channel
|
||||
* @chan: dma channel to initialize
|
||||
*/
|
||||
static inline void dma_cookie_init(struct dma_chan *chan)
|
||||
{
|
||||
chan->cookie = DMA_MIN_COOKIE;
|
||||
chan->completed_cookie = DMA_MIN_COOKIE;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_cookie_assign - assign a DMA engine cookie to the descriptor
|
||||
* @tx: descriptor needing cookie
|
||||
*
|
||||
* Assign a unique non-zero per-channel cookie to the descriptor.
|
||||
* Note: caller is expected to hold a lock to prevent concurrency.
|
||||
*/
|
||||
static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
struct dma_chan *chan = tx->chan;
|
||||
dma_cookie_t cookie;
|
||||
|
||||
cookie = chan->cookie + 1;
|
||||
if (cookie < DMA_MIN_COOKIE)
|
||||
cookie = DMA_MIN_COOKIE;
|
||||
tx->cookie = chan->cookie = cookie;
|
||||
|
||||
return cookie;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_cookie_complete - complete a descriptor
|
||||
* @tx: descriptor to complete
|
||||
*
|
||||
* Mark this descriptor complete by updating the channels completed
|
||||
* cookie marker. Zero the descriptors cookie to prevent accidental
|
||||
* repeated completions.
|
||||
*
|
||||
* Note: caller is expected to hold a lock to prevent concurrency.
|
||||
*/
|
||||
static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
BUG_ON(tx->cookie < DMA_MIN_COOKIE);
|
||||
tx->chan->completed_cookie = tx->cookie;
|
||||
tx->cookie = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_cookie_status - report cookie status
|
||||
* @chan: dma channel
|
||||
* @cookie: cookie we are interested in
|
||||
* @state: dma_tx_state structure to return last/used cookies
|
||||
*
|
||||
* Report the status of the cookie, filling in the state structure if
|
||||
* non-NULL. No locking is required.
|
||||
*/
|
||||
static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
|
||||
dma_cookie_t cookie, struct dma_tx_state *state)
|
||||
{
|
||||
dma_cookie_t used, complete;
|
||||
|
||||
used = chan->cookie;
|
||||
complete = chan->completed_cookie;
|
||||
barrier();
|
||||
if (state) {
|
||||
state->last = complete;
|
||||
state->used = used;
|
||||
state->residue = 0;
|
||||
state->in_flight_bytes = 0;
|
||||
}
|
||||
return dma_async_is_complete(cookie, complete, used);
|
||||
}
|
||||
|
||||
static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
|
||||
{
|
||||
if (state)
|
||||
state->residue = residue;
|
||||
}
|
||||
|
||||
static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,
|
||||
u32 in_flight_bytes)
|
||||
{
|
||||
if (state)
|
||||
state->in_flight_bytes = in_flight_bytes;
|
||||
}
|
||||
|
||||
struct dmaengine_desc_callback {
|
||||
dma_async_tx_callback callback;
|
||||
dma_async_tx_callback_result callback_result;
|
||||
void *callback_param;
|
||||
};
|
||||
|
||||
/**
|
||||
* dmaengine_desc_get_callback - get the passed in callback function
|
||||
* @tx: tx descriptor
|
||||
* @cb: temp struct to hold the callback info
|
||||
*
|
||||
* Fill the passed in cb struct with what's available in the passed in
|
||||
* tx descriptor struct
|
||||
* No locking is required.
|
||||
*/
|
||||
static inline void
|
||||
dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
|
||||
struct dmaengine_desc_callback *cb)
|
||||
{
|
||||
cb->callback = tx->callback;
|
||||
cb->callback_result = tx->callback_result;
|
||||
cb->callback_param = tx->callback_param;
|
||||
}
|
||||
|
||||
/**
|
||||
* dmaengine_desc_callback_invoke - call the callback function in cb struct
|
||||
* @cb: temp struct that is holding the callback info
|
||||
* @result: transaction result
|
||||
*
|
||||
* Call the callback function provided in the cb struct with the parameter
|
||||
* in the cb struct.
|
||||
* Locking is dependent on the driver.
|
||||
*/
|
||||
static inline void
|
||||
dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
|
||||
const struct dmaengine_result *result)
|
||||
{
|
||||
struct dmaengine_result dummy_result = {
|
||||
.result = DMA_TRANS_NOERROR,
|
||||
.residue = 0
|
||||
};
|
||||
|
||||
if (cb->callback_result) {
|
||||
if (!result)
|
||||
result = &dummy_result;
|
||||
cb->callback_result(cb->callback_param, result);
|
||||
} else if (cb->callback) {
|
||||
cb->callback(cb->callback_param);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
|
||||
* then immediately call the callback.
|
||||
* @tx: dma async tx descriptor
|
||||
* @result: transaction result
|
||||
*
|
||||
* Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
|
||||
* in a single function since no work is necessary in between for the driver.
|
||||
* Locking is dependent on the driver.
|
||||
*/
|
||||
static inline void
|
||||
dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
|
||||
const struct dmaengine_result *result)
|
||||
{
|
||||
struct dmaengine_desc_callback cb;
|
||||
|
||||
dmaengine_desc_get_callback(tx, &cb);
|
||||
dmaengine_desc_callback_invoke(&cb, result);
|
||||
}
|
||||
|
||||
/**
|
||||
* dmaengine_desc_callback_valid - verify the callback is valid in cb
|
||||
* @cb: callback info struct
|
||||
*
|
||||
* Return a bool that verifies whether callback in cb is valid or not.
|
||||
* No locking is required.
|
||||
*/
|
||||
static inline bool
|
||||
dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
|
||||
{
|
||||
return cb->callback || cb->callback_result;
|
||||
}
|
||||
|
||||
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
|
||||
struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
static inline struct dentry *
|
||||
dmaengine_get_debugfs_root(struct dma_device *dma_dev) {
|
||||
return dma_dev->dbg_dev_root;
|
||||
}
|
||||
#else
|
||||
struct dentry;
|
||||
static inline struct dentry *
|
||||
dmaengine_get_debugfs_root(struct dma_device *dma_dev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
#endif
|
||||
1567
drivers/dma/tegra186-gpc-dma.c
Normal file
1567
drivers/dma/tegra186-gpc-dma.c
Normal file
File diff suppressed because it is too large
Load Diff
227
drivers/dma/virt-dma.h
Normal file
227
drivers/dma/virt-dma.h
Normal file
@@ -0,0 +1,227 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Virtual DMA channel support for DMAengine
|
||||
*
|
||||
* Copyright (C) 2012 Russell King
|
||||
*/
|
||||
#ifndef VIRT_DMA_H
|
||||
#define VIRT_DMA_H
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include "dmaengine.h"
|
||||
|
||||
struct virt_dma_desc {
|
||||
struct dma_async_tx_descriptor tx;
|
||||
struct dmaengine_result tx_result;
|
||||
/* protected by vc.lock */
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
struct virt_dma_chan {
|
||||
struct dma_chan chan;
|
||||
struct tasklet_struct task;
|
||||
void (*desc_free)(struct virt_dma_desc *);
|
||||
|
||||
spinlock_t lock;
|
||||
|
||||
/* protected by vc.lock */
|
||||
struct list_head desc_allocated;
|
||||
struct list_head desc_submitted;
|
||||
struct list_head desc_issued;
|
||||
struct list_head desc_completed;
|
||||
struct list_head desc_terminated;
|
||||
|
||||
struct virt_dma_desc *cyclic;
|
||||
};
|
||||
|
||||
static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
|
||||
{
|
||||
return container_of(chan, struct virt_dma_chan, chan);
|
||||
}
|
||||
|
||||
void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
|
||||
void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
|
||||
struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
|
||||
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
|
||||
extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
|
||||
|
||||
/**
|
||||
* vchan_tx_prep - prepare a descriptor
|
||||
* @vc: virtual channel allocating this descriptor
|
||||
* @vd: virtual descriptor to prepare
|
||||
* @tx_flags: flags argument passed in to prepare function
|
||||
*/
|
||||
static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
|
||||
struct virt_dma_desc *vd, unsigned long tx_flags)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
|
||||
vd->tx.flags = tx_flags;
|
||||
vd->tx.tx_submit = vchan_tx_submit;
|
||||
vd->tx.desc_free = vchan_tx_desc_free;
|
||||
|
||||
vd->tx_result.result = DMA_TRANS_NOERROR;
|
||||
vd->tx_result.residue = 0;
|
||||
|
||||
spin_lock_irqsave(&vc->lock, flags);
|
||||
list_add_tail(&vd->node, &vc->desc_allocated);
|
||||
spin_unlock_irqrestore(&vc->lock, flags);
|
||||
|
||||
return &vd->tx;
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_issue_pending - move submitted descriptors to issued list
|
||||
* @vc: virtual channel to update
|
||||
*
|
||||
* vc.lock must be held by caller
|
||||
*/
|
||||
static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
|
||||
{
|
||||
list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
|
||||
return !list_empty(&vc->desc_issued);
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_cookie_complete - report completion of a descriptor
|
||||
* @vd: virtual descriptor to update
|
||||
*
|
||||
* vc.lock must be held by caller
|
||||
*/
|
||||
static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
|
||||
dma_cookie_t cookie;
|
||||
|
||||
cookie = vd->tx.cookie;
|
||||
dma_cookie_complete(&vd->tx);
|
||||
dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
|
||||
vd, cookie);
|
||||
list_add_tail(&vd->node, &vc->desc_completed);
|
||||
|
||||
tasklet_schedule(&vc->task);
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_vdesc_fini - Free or reuse a descriptor
|
||||
* @vd: virtual descriptor to free/reuse
|
||||
*/
|
||||
static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
|
||||
|
||||
if (dmaengine_desc_test_reuse(&vd->tx)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vc->lock, flags);
|
||||
list_add(&vd->node, &vc->desc_allocated);
|
||||
spin_unlock_irqrestore(&vc->lock, flags);
|
||||
} else {
|
||||
vc->desc_free(vd);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_cyclic_callback - report the completion of a period
|
||||
* @vd: virtual descriptor
|
||||
*/
|
||||
static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
|
||||
|
||||
vc->cyclic = vd;
|
||||
tasklet_schedule(&vc->task);
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_terminate_vdesc - Disable pending cyclic callback
|
||||
* @vd: virtual descriptor to be terminated
|
||||
*
|
||||
* vc.lock must be held by caller
|
||||
*/
|
||||
static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
|
||||
|
||||
list_add_tail(&vd->node, &vc->desc_terminated);
|
||||
|
||||
if (vc->cyclic == vd)
|
||||
vc->cyclic = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_next_desc - peek at the next descriptor to be processed
|
||||
* @vc: virtual channel to obtain descriptor from
|
||||
*
|
||||
* vc.lock must be held by caller
|
||||
*/
|
||||
static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
|
||||
{
|
||||
return list_first_entry_or_null(&vc->desc_issued,
|
||||
struct virt_dma_desc, node);
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_get_all_descriptors - obtain all submitted and issued descriptors
|
||||
* @vc: virtual channel to get descriptors from
|
||||
* @head: list of descriptors found
|
||||
*
|
||||
* vc.lock must be held by caller
|
||||
*
|
||||
* Removes all submitted and issued descriptors from internal lists, and
|
||||
* provides a list of all descriptors found
|
||||
*/
|
||||
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
|
||||
struct list_head *head)
|
||||
{
|
||||
list_splice_tail_init(&vc->desc_allocated, head);
|
||||
list_splice_tail_init(&vc->desc_submitted, head);
|
||||
list_splice_tail_init(&vc->desc_issued, head);
|
||||
list_splice_tail_init(&vc->desc_completed, head);
|
||||
list_splice_tail_init(&vc->desc_terminated, head);
|
||||
}
|
||||
|
||||
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
|
||||
{
|
||||
struct virt_dma_desc *vd;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(head);
|
||||
|
||||
spin_lock_irqsave(&vc->lock, flags);
|
||||
vchan_get_all_descriptors(vc, &head);
|
||||
list_for_each_entry(vd, &head, node)
|
||||
dmaengine_desc_clear_reuse(&vd->tx);
|
||||
spin_unlock_irqrestore(&vc->lock, flags);
|
||||
|
||||
vchan_dma_desc_free_list(vc, &head);
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_synchronize() - synchronize callback execution to the current context
|
||||
* @vc: virtual channel to synchronize
|
||||
*
|
||||
* Makes sure that all scheduled or active callbacks have finished running. For
|
||||
* proper operation the caller has to ensure that no new callbacks are scheduled
|
||||
* after the invocation of this function started.
|
||||
* Free up the terminated cyclic descriptor to prevent memory leakage.
|
||||
*/
|
||||
static inline void vchan_synchronize(struct virt_dma_chan *vc)
|
||||
{
|
||||
LIST_HEAD(head);
|
||||
unsigned long flags;
|
||||
|
||||
tasklet_kill(&vc->task);
|
||||
|
||||
spin_lock_irqsave(&vc->lock, flags);
|
||||
|
||||
list_splice_tail_init(&vc->desc_terminated, &head);
|
||||
|
||||
spin_unlock_irqrestore(&vc->lock, flags);
|
||||
|
||||
vchan_dma_desc_free_list(vc, &head);
|
||||
}
|
||||
|
||||
#endif
|
||||
Reference in New Issue
Block a user