Commit 4879b7ae authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'dmaengine-4.12-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:
 "This time again a smaller update consisting of:

   - support for TI DA8xx dma controller and updates to the cppi driver

   - updates on bunch of drivers like xilinx, pl08x, stm32-dma, mv_xor,
     ioat, dmatest"

* tag 'dmaengine-4.12-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (35 commits)
  dmaengine: pl08x: remove lock documentation
  dmaengine: pl08x: fix pl08x_dma_chan_state documentation
  dmaengine: pl08x: Use the BIT() macro consistently
  dmaengine: pl080: Fix some missing kerneldoc
  dmaengine: pl080: Cut some unused defines
  dmaengine: dmatest: Add check for supported buffer count (sg_buffers)
  dmaengine: dmatest: Select DMA_ENGINE_RAID as its needed for the slave_sg test
  dmaengine: virt-dma: Convert to use list_for_each_entry_safe()
  dma-debug: use offset_in_page() macro
  dmaengine: mv_xor: use offset_in_page() macro
  dmaengine: dmatest: use offset_in_pa...
parents ecc721a7 be13ec66
......@@ -18,10 +18,26 @@ Required properties:
- phy-names: Should be "usb-phy"
- dmas: specifies the dma channels
- dma-names: specifies the names of the channels. Use "rxN" for receive
and "txN" for transmit endpoints. N specifies the endpoint number.
Optional properties:
~~~~~~~~~~~~~~~~~~~~
- vbus-supply: Phandle to a regulator providing the USB bus power.
DMA
~~~
- compatible: ti,da830-cppi41
- reg: offset and length of the following register spaces: CPPI DMA Controller,
CPPI DMA Scheduler, Queue Manager
- reg-names: "controller", "scheduler", "queuemgr"
- #dma-cells: should be set to 2. The first number represents the
channel number (0 … 3 for endpoints 1 … 4).
The second number is 0 for RX and 1 for TX transfers.
- #dma-channels: should be set to 4 representing the 4 endpoints.
Example:
usb_phy: usb-phy {
compatible = "ti,da830-usb-phy";
......@@ -30,7 +46,10 @@ Example:
};
usb0: usb@200000 {
compatible = "ti,da830-musb";
reg = <0x00200000 0x10000>;
reg = <0x00200000 0x1000>;
ranges;
#address-cells = <1>;
#size-cells = <1>;
interrupts = <58>;
interrupt-names = "mc";
......@@ -39,5 +58,25 @@ Example:
phys = <&usb_phy 0>;
phy-names = "usb-phy";
dmas = <&cppi41dma 0 0 &cppi41dma 1 0
&cppi41dma 2 0 &cppi41dma 3 0
&cppi41dma 0 1 &cppi41dma 1 1
&cppi41dma 2 1 &cppi41dma 3 1>;
dma-names =
"rx1", "rx2", "rx3", "rx4",
"tx1", "tx2", "tx3", "tx4";
status = "okay";
cppi41dma: dma-controller@201000 {
compatible = "ti,da830-cppi41";
reg = <0x201000 0x1000
0x202000 0x1000
0x204000 0x4000>;
reg-names = "controller", "scheduler", "queuemgr";
interrupts = <58>;
#dma-cells = <2>;
#dma-channels = <4>;
};
};
......@@ -10,7 +10,6 @@
* published by the Free Software Foundation.
*/
#include <linux/amba/pl330.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
......
......@@ -514,12 +514,12 @@ config TIMB_DMA
Enable support for the Timberdale FPGA DMA engine.
config TI_CPPI41
tristate "AM33xx CPPI41 DMA support"
depends on ARCH_OMAP
tristate "CPPI 4.1 DMA support"
depends on (ARCH_OMAP || ARCH_DAVINCI_DA8XX)
select DMA_ENGINE
help
The Communications Port Programming Interface (CPPI) 4.1 DMA engine
is currently used by the USB driver on AM335x platforms.
is currently used by the USB driver on AM335x and DA8xx platforms.
config TI_DMA_CROSSBAR
bool
......@@ -608,6 +608,7 @@ config ASYNC_TX_DMA
config DMATEST
tristate "DMA Test client"
depends on DMA_ENGINE
select DMA_ENGINE_RAID
help
Simple DMA test client. Say N unless you're debugging a
DMA Device driver.
......
......@@ -106,6 +106,7 @@ struct pl08x_driver_data;
/**
* struct vendor_data - vendor-specific config parameters for PL08x derivatives
* @config_offset: offset to the configuration register
* @channels: the number of channels available in this variant
* @signals: the number of request signals available from the hardware
* @dualmaster: whether this version supports dual AHB masters or not.
......@@ -145,6 +146,8 @@ struct pl08x_bus_data {
/**
* struct pl08x_phy_chan - holder for the physical channels
* @id: physical index to this channel
* @base: memory base address for this physical channel
* @reg_config: configuration address for this physical channel
* @lock: a lock to use when altering an instance of this struct
* @serving: the virtual channel currently being served by this physical
* channel
......@@ -203,7 +206,7 @@ struct pl08x_txd {
};
/**
* struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
* enum pl08x_dma_chan_state - holds the PL08x specific virtual channel
* states
* @PL08X_CHAN_IDLE: the channel is idle
* @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
......@@ -226,9 +229,8 @@ enum pl08x_dma_chan_state {
* @phychan: the physical channel utilized by this channel, if there is one
* @name: name of channel
* @cd: channel platform data
* @runtime_addr: address for RX/TX according to the runtime config
* @cfg: slave configuration
* @at: active transaction on this channel
* @lock: a lock for this channel data
* @host: a pointer to the host (internal use)
* @state: whether the channel is idle, paused, running etc
* @slave: whether this channel is a device (slave) or for memcpy
......@@ -262,7 +264,7 @@ struct pl08x_dma_chan {
* @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
* fetches
* @mem_buses: set to indicate memory transfers on AHB2.
* @lock: a spinlock for this struct
* @lli_words: how many words are used in each LLI item for this variant
*/
struct pl08x_driver_data {
struct dma_device slave;
......@@ -417,7 +419,7 @@ static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan)
/* Enable the DMA channel */
/* Do not access config register until channel shows as disabled */
while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
while (readl(pl08x->base + PL080_EN_CHAN) & BIT(phychan->id))
cpu_relax();
/* Do not access config register until channel shows as inactive */
......@@ -484,8 +486,8 @@ static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
writel(val, ch->reg_config);
writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
writel(BIT(ch->id), pl08x->base + PL080_ERR_CLEAR);
writel(BIT(ch->id), pl08x->base + PL080_TC_CLEAR);
}
static inline u32 get_bytes_in_cctl(u32 cctl)
......@@ -1834,7 +1836,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
return IRQ_NONE;
for (i = 0; i < pl08x->vd->channels; i++) {
if (((1 << i) & err) || ((1 << i) & tc)) {
if ((BIT(i) & err) || (BIT(i) & tc)) {
/* Locate physical channel */
struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
struct pl08x_dma_chan *plchan = phychan->serving;
......@@ -1872,7 +1874,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
}
spin_unlock(&plchan->vc.lock);
mask |= (1 << i);
mask |= BIT(i);
}
}
......
......@@ -68,7 +68,6 @@
#define QMGR_MEMCTRL_IDX_SH 16
#define QMGR_MEMCTRL_DESC_SH 8
#define QMGR_NUM_PEND 5
#define QMGR_PEND(x) (0x90 + (x) * 4)
#define QMGR_PENDING_SLOT_Q(x) (x / 32)
......@@ -131,7 +130,6 @@ struct cppi41_dd {
u32 first_td_desc;
struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
void __iomem *usbss_mem;
void __iomem *ctrl_mem;
void __iomem *sched_mem;
void __iomem *qmgr_mem;
......@@ -139,6 +137,10 @@ struct cppi41_dd {
const struct chan_queues *queues_rx;
const struct chan_queues *queues_tx;
struct chan_queues td_queue;
u16 first_completion_queue;
u16 qmgr_num_pend;
u32 n_chans;
u8 platform;
struct list_head pending; /* Pending queued transfers */
spinlock_t lock; /* Lock for pending list */
......@@ -149,8 +151,7 @@ struct cppi41_dd {
bool is_suspended;
};
#define FIST_COMPLETION_QUEUE 93
static struct chan_queues usb_queues_tx[] = {
static struct chan_queues am335x_usb_queues_tx[] = {
/* USB0 ENDP 1 */
[ 0] = { .submit = 32, .complete = 93},
[ 1] = { .submit = 34, .complete = 94},
......@@ -186,7 +187,7 @@ static struct chan_queues usb_queues_tx[] = {
[29] = { .submit = 90, .complete = 139},
};
static const struct chan_queues usb_queues_rx[] = {
static const struct chan_queues am335x_usb_queues_rx[] = {
/* USB0 ENDP 1 */
[ 0] = { .submit = 1, .complete = 109},
[ 1] = { .submit = 2, .complete = 110},
......@@ -222,11 +223,26 @@ static const struct chan_queues usb_queues_rx[] = {
[29] = { .submit = 30, .complete = 155},
};
static const struct chan_queues da8xx_usb_queues_tx[] = {
[0] = { .submit = 16, .complete = 24},
[1] = { .submit = 18, .complete = 24},
[2] = { .submit = 20, .complete = 24},
[3] = { .submit = 22, .complete = 24},
};
static const struct chan_queues da8xx_usb_queues_rx[] = {
[0] = { .submit = 1, .complete = 26},
[1] = { .submit = 3, .complete = 26},
[2] = { .submit = 5, .complete = 26},
[3] = { .submit = 7, .complete = 26},
};
struct cppi_glue_infos {
irqreturn_t (*isr)(int irq, void *data);
const struct chan_queues *queues_rx;
const struct chan_queues *queues_tx;
struct chan_queues td_queue;
u16 first_completion_queue;
u16 qmgr_num_pend;
};
static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
......@@ -285,19 +301,21 @@ static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
static irqreturn_t cppi41_irq(int irq, void *data)
{
struct cppi41_dd *cdd = data;
u16 first_completion_queue = cdd->first_completion_queue;
u16 qmgr_num_pend = cdd->qmgr_num_pend;
struct cppi41_channel *c;
int i;
for (i = QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE); i < QMGR_NUM_PEND;
for (i = QMGR_PENDING_SLOT_Q(first_completion_queue); i < qmgr_num_pend;
i++) {
u32 val;
u32 q_num;
val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
if (i == QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE) && val) {
if (i == QMGR_PENDING_SLOT_Q(first_completion_queue) && val) {
u32 mask;
/* set corresponding bit for completetion Q 93 */
mask = 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE);
mask = 1 << QMGR_PENDING_BIT_Q(first_completion_queue);
/* not set all bits for queues less than Q 93 */
mask--;
/* now invert and keep only Q 93+ set */
......@@ -402,11 +420,9 @@ static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
struct cppi41_channel *c = to_cpp41_chan(chan);
enum dma_status ret;
/* lock */
ret = dma_cookie_status(chan, cookie, txstate);
if (txstate && ret == DMA_COMPLETE)
txstate->residue = c->residue;
/* unlock */
dma_set_residue(txstate, c->residue);
return ret;
}
......@@ -630,7 +646,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
if (!c->is_tx) {
reg |= GCR_STARV_RETRY;
reg |= GCR_DESC_TYPE_HOST;
reg |= c->q_comp_num;
reg |= cdd->td_queue.complete;
}
reg |= GCR_TEARDOWN;
cppi_writel(reg, c->gcr_reg);
......@@ -641,7 +657,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
if (!c->td_seen || !c->td_desc_seen) {
desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
if (!desc_phys)
if (!desc_phys && c->is_tx)
desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
if (desc_phys == c->desc_phys) {
......@@ -723,39 +739,24 @@ static int cppi41_stop_chan(struct dma_chan *chan)
return 0;
}
static void cleanup_chans(struct cppi41_dd *cdd)
{
while (!list_empty(&cdd->ddev.channels)) {
struct cppi41_channel *cchan;
cchan = list_first_entry(&cdd->ddev.channels,
struct cppi41_channel, chan.device_node);
list_del(&cchan->chan.device_node);
kfree(cchan);
}
}
static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
{
struct cppi41_channel *cchan;
struct cppi41_channel *cchan, *chans;
int i;
int ret;
u32 n_chans;
u32 n_chans = cdd->n_chans;
ret = of_property_read_u32(dev->of_node, "#dma-channels",
&n_chans);
if (ret)
return ret;
/*
* The channels can only be used as TX or as RX. So we add twice
* that much dma channels because USB can only do RX or TX.
*/
n_chans *= 2;
chans = devm_kcalloc(dev, n_chans, sizeof(*chans), GFP_KERNEL);
if (!chans)
return -ENOMEM;
for (i = 0; i < n_chans; i++) {
cchan = kzalloc(sizeof(*cchan), GFP_KERNEL);
if (!cchan)
goto err;
cchan = &chans[i];
cchan->cdd = cdd;
if (i & 1) {
......@@ -775,9 +776,6 @@ static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
cdd->first_td_desc = n_chans;
return 0;
err:
cleanup_chans(cdd);
return -ENOMEM;
}
static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
......@@ -859,7 +857,7 @@ static void init_sched(struct cppi41_dd *cdd)
word = 0;
cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
for (ch = 0; ch < 15 * 2; ch += 2) {
for (ch = 0; ch < cdd->n_chans; ch += 2) {
reg = SCHED_ENTRY0_CHAN(ch);
reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
......@@ -869,7 +867,7 @@ static void init_sched(struct cppi41_dd *cdd)
cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
word++;
}
reg = 15 * 2 * 2 - 1;
reg = cdd->n_chans * 2 - 1;
reg |= DMA_SCHED_CTRL_EN;
cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
}
......@@ -885,7 +883,7 @@ static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
return -ENOMEM;
cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
cppi_writel(TOTAL_DESCS_NUM, cdd->qmgr_mem + QMGR_LRAM_SIZE);
cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
ret = init_descs(dev, cdd);
......@@ -894,6 +892,7 @@ static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
init_sched(cdd);
return 0;
err_td:
deinit_cppi41(dev, cdd);
......@@ -933,8 +932,9 @@ static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
else
queues = cdd->queues_rx;
BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx) != ARRAY_SIZE(usb_queues_tx));
if (WARN_ON(cchan->port_num > ARRAY_SIZE(usb_queues_rx)))
BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) !=
ARRAY_SIZE(am335x_usb_queues_tx));
if (WARN_ON(cchan->port_num > ARRAY_SIZE(am335x_usb_queues_rx)))
return false;
cchan->q_num = queues[cchan->port_num].submit;
......@@ -962,15 +962,25 @@ static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
&dma_spec->args[0]);
}
static const struct cppi_glue_infos usb_infos = {
.isr = cppi41_irq,
.queues_rx = usb_queues_rx,
.queues_tx = usb_queues_tx,
static const struct cppi_glue_infos am335x_usb_infos = {
.queues_rx = am335x_usb_queues_rx,
.queues_tx = am335x_usb_queues_tx,
.td_queue = { .submit = 31, .complete = 0 },
.first_completion_queue = 93,
.qmgr_num_pend = 5,
};
static const struct cppi_glue_infos da8xx_usb_infos = {
.queues_rx = da8xx_usb_queues_rx,
.queues_tx = da8xx_usb_queues_tx,
.td_queue = { .submit = 31, .complete = 0 },
.first_completion_queue = 24,
.qmgr_num_pend = 2,
};
static const struct of_device_id cppi41_dma_ids[] = {
{ .compatible = "ti,am3359-cppi41", .data = &usb_infos},
{ .compatible = "ti,am3359-cppi41", .data = &am335x_usb_infos},
{ .compatible = "ti,da830-cppi41", .data = &da8xx_usb_infos},
{},
};
MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
......@@ -995,6 +1005,8 @@ static int cppi41_dma_probe(struct platform_device *pdev)
struct cppi41_dd *cdd;
struct device *dev = &pdev->dev;
const struct cppi_glue_infos *glue_info;
struct resource *mem;
int index;
int irq;
int ret;
......@@ -1021,19 +1033,31 @@ static int cppi41_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&cdd->ddev.channels);
cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
cdd->usbss_mem = of_iomap(dev->of_node, 0);
cdd->ctrl_mem = of_iomap(dev->of_node, 1);
cdd->sched_mem = of_iomap(dev->of_node, 2);
cdd->qmgr_mem = of_iomap(dev->of_node, 3);
index = of_property_match_string(dev->of_node,
"reg-names", "controller");
if (index < 0)
return index;
mem = platform_get_resource(pdev, IORESOURCE_MEM, index);
cdd->ctrl_mem = devm_ioremap_resource(dev, mem);
if (IS_ERR(cdd->ctrl_mem))
return PTR_ERR(cdd->ctrl_mem);
mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
cdd->sched_mem = devm_ioremap_resource(dev, mem);
if (IS_ERR(cdd->sched_mem))
return PTR_ERR(cdd->sched_mem);
mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2);
cdd->qmgr_mem = devm_ioremap_resource(dev, mem);
if (IS_ERR(cdd->qmgr_mem))
return PTR_ERR(cdd->qmgr_mem);
spin_lock_init(&cdd->lock);
INIT_LIST_HEAD(&cdd->pending);
platform_set_drvdata(pdev, cdd);
if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
!cdd->qmgr_mem)
return -ENXIO;
pm_runtime_enable(dev);
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
......@@ -1044,6 +1068,13 @@ static int cppi41_dma_probe(struct platform_device *pdev)
cdd->queues_rx = glue_info->queues_rx;
cdd->queues_tx = glue_info->queues_tx;
cdd->td_queue = glue_info->td_queue;
cdd->qmgr_num_pend = glue_info->qmgr_num_pend;
cdd->first_completion_queue = glue_info->first_completion_queue;
ret = of_property_read_u32(dev->of_node,
"#dma-channels", &cdd->n_chans);
if (ret)
goto err_get_n_chans;
ret = init_cppi41(dev, cdd);
if (ret)
......@@ -1056,18 +1087,18 @@ static int cppi41_dma_probe(struct platform_device *pdev)
irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq) {
ret = -EINVAL;
goto err_irq;
goto err_chans;
}
ret = devm_request_irq(&pdev->dev, irq, glue_info->isr, IRQF_SHARED,
ret = devm_request_irq(&pdev->dev, irq, cppi41_irq, IRQF_SHARED,
dev_name(dev), cdd);
if (ret)
goto err_irq;
goto err_chans;
cdd->irq = irq;
ret = dma_async_device_register(&cdd->ddev);
if (ret)
goto err_dma_reg;
goto err_chans;
ret = of_dma_controller_register(dev->of_node,
cppi41_dma_xlate, &cpp41_dma_info);
......@@ -1080,20 +1111,14 @@ static int cppi41_dma_probe(struct platform_device *pdev)
return 0;
err_of:
dma_async_device_unregister(&cdd->ddev);
err_dma_reg:
err_irq:
cleanup_chans(cdd);
err_chans:
deinit_cppi41(dev, cdd);
err_init_cppi:
pm_runtime_dont_use_autosuspend(dev);
err_get_n_chans:
err_get_sync:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
iounmap(cdd->usbss_mem);
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
iounmap(cdd->qmgr_mem);
return ret;
}
......@@ -1110,12 +1135,7 @@ static int cppi41_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&cdd->ddev);
devm_free_irq(&pdev->dev, cdd->irq, cdd);
cleanup_chans(cdd);
deinit_cppi41(&pdev->dev, cdd);
iounmap(cdd->usbss_mem);
iounmap(cdd->ctrl_mem);
iounmap(cdd->sched_mem);
iounmap(cdd->qmgr_mem);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
......
......@@ -535,6 +535,13 @@ static int dmatest_func(void *data)
total_tests++;
/* Check if buffer count fits into map count variable (u8) */
if ((src_cnt + dst_cnt) >= 255) {
pr_err("too many buffers (%d of 255 supported)\n",
src_cnt + dst_cnt);
break;
}
if (1 << align > params->buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n",
params->buf_size, 1 << align);
......@@ -585,7 +592,7 @@ static int dmatest_func(void *data)
for (i = 0; i < src_cnt; i++) {
void *buf = thread->srcs[i];
struct page *pg = virt_to_page(buf);
unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
unsigned long pg_off = offset_in_page(buf);
um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
um->len, DMA_TO_DEVICE);
......@@ -605,7 +612,7 @@ static int dmatest_func(void *data)
for (i = 0; i < dst_cnt; i++) {
void *buf = thread->dsts[i];
struct page *pg = virt_to_page(buf);
unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
unsigned long pg_off = offset_in_page(buf);
dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
DMA_BIDIRECTIONAL);
......
......@@ -937,6 +937,21 @@ static int sdma_disable_channel(struct dma_chan *chan)
return 0;
}
static int sdma_disable_channel_with_delay(struct dma_chan *chan)
{
sdma_disable_channel(chan);
/*
* According to NXP R&D team a delay of one BD SDMA cost time
* (maximum is 1ms) should be added after disable of the channel
* bit, to ensure SDMA core has really been stopped after SDMA
* clients call .device_terminate_all.
*/
mdelay(1);
return 0;
}
static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
{
struct sdma_engine *sdma = sdmac->sdma;
......@@ -1828,11 +1843,11 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config;
sdma->dma_device.device_terminate_all = sdma_disable_channel;
sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
sdma->dma_device.device_issue_pending = sdma_issue_pending;
sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
dma_set_max_seg_size(sdma->dma_device.dev, 65535);
......
......@@ -760,9 +760,7 @@ ioat_init_channel(struct ioatdma_device *ioat_dma,
dma_cookie_init(&ioat_chan->dma_chan);
list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);