aboutsummaryrefslogtreecommitdiff
path: root/drivers/dma
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/dmatest.c22
-rw-r--r--drivers/dma/ioat_dma.c16
-rw-r--r--drivers/dma/iop-adma.c27
-rw-r--r--drivers/dma/iovlock.c17
-rw-r--r--drivers/dma/mv_xor.c15
6 files changed, 66 insertions, 38 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index dc003a3a787..65799651737 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -388,7 +388,10 @@ int dma_async_device_register(struct dma_device *device)
init_completion(&device->done);
kref_init(&device->refcount);
+
+ mutex_lock(&dma_list_mutex);
device->dev_id = id++;
+ mutex_unlock(&dma_list_mutex);
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
@@ -399,8 +402,8 @@ int dma_async_device_register(struct dma_device *device)
chan->chan_id = chancnt++;
chan->dev.class = &dma_devclass;
chan->dev.parent = device->dev;
- snprintf(chan->dev.bus_id, BUS_ID_SIZE, "dma%dchan%d",
- device->dev_id, chan->chan_id);
+ dev_set_name(&chan->dev, "dma%dchan%d",
+ device->dev_id, chan->chan_id);
rc = device_register(&chan->dev);
if (rc) {
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index d1e381e35a9..ed9636bfb54 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -20,11 +20,11 @@ static unsigned int test_buf_size = 16384;
module_param(test_buf_size, uint, S_IRUGO);
MODULE_PARM_DESC(test_buf_size, "Size of the memcpy test buffer");
-static char test_channel[BUS_ID_SIZE];
+static char test_channel[20];
module_param_string(channel, test_channel, sizeof(test_channel), S_IRUGO);
MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
-static char test_device[BUS_ID_SIZE];
+static char test_device[20];
module_param_string(device, test_device, sizeof(test_device), S_IRUGO);
MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
@@ -80,14 +80,14 @@ static bool dmatest_match_channel(struct dma_chan *chan)
{
if (test_channel[0] == '\0')
return true;
- return strcmp(chan->dev.bus_id, test_channel) == 0;
+ return strcmp(dev_name(&chan->dev), test_channel) == 0;
}
static bool dmatest_match_device(struct dma_device *device)
{
if (test_device[0] == '\0')
return true;
- return strcmp(device->dev->bus_id, test_device) == 0;
+ return strcmp(dev_name(device->dev), test_device) == 0;
}
static unsigned long dmatest_random(void)
@@ -332,7 +332,7 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
dtc = kmalloc(sizeof(struct dmatest_chan), GFP_KERNEL);
if (!dtc) {
- pr_warning("dmatest: No memory for %s\n", chan->dev.bus_id);
+ pr_warning("dmatest: No memory for %s\n", dev_name(&chan->dev));
return DMA_NAK;
}
@@ -343,16 +343,16 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL);
if (!thread) {
pr_warning("dmatest: No memory for %s-test%u\n",
- chan->dev.bus_id, i);
+ dev_name(&chan->dev), i);
break;
}
thread->chan = dtc->chan;
smp_wmb();
thread->task = kthread_run(dmatest_func, thread, "%s-test%u",
- chan->dev.bus_id, i);
+ dev_name(&chan->dev), i);
if (IS_ERR(thread->task)) {
pr_warning("dmatest: Failed to run thread %s-test%u\n",
- chan->dev.bus_id, i);
+ dev_name(&chan->dev), i);
kfree(thread);
break;
}
@@ -362,7 +362,7 @@ static enum dma_state_client dmatest_add_channel(struct dma_chan *chan)
list_add_tail(&thread->node, &dtc->threads);
}
- pr_info("dmatest: Started %u threads using %s\n", i, chan->dev.bus_id);
+ pr_info("dmatest: Started %u threads using %s\n", i, dev_name(&chan->dev));
list_add_tail(&dtc->node, &dmatest_channels);
nr_channels++;
@@ -379,7 +379,7 @@ static enum dma_state_client dmatest_remove_channel(struct dma_chan *chan)
list_del(&dtc->node);
dmatest_cleanup_channel(dtc);
pr_debug("dmatest: lost channel %s\n",
- chan->dev.bus_id);
+ dev_name(&chan->dev));
return DMA_ACK;
}
}
@@ -418,7 +418,7 @@ dmatest_event(struct dma_client *client, struct dma_chan *chan,
default:
pr_info("dmatest: Unhandled event %u (%s)\n",
- state, chan->dev.bus_id);
+ state, dev_name(&chan->dev));
break;
}
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index b0438c4f0c3..6607fdd00b1 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -525,7 +525,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
}
hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
- if (new->async_tx.callback) {
+ if (first->async_tx.callback) {
hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
if (first != new) {
/* move callback into to last desc */
@@ -617,7 +617,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
}
hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
- if (new->async_tx.callback) {
+ if (first->async_tx.callback) {
hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
if (first != new) {
/* move callback into to last desc */
@@ -807,6 +807,12 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
struct ioat_desc_sw *desc, *_desc;
int in_use_descs = 0;
+ /* Before freeing channel resources first check
+ * if they have been previously allocated for this channel.
+ */
+ if (ioat_chan->desccount == 0)
+ return;
+
tasklet_disable(&ioat_chan->cleanup_task);
ioat_dma_memcpy_cleanup(ioat_chan);
@@ -869,6 +875,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
ioat_chan->last_completion = ioat_chan->completion_addr = 0;
ioat_chan->pending = 0;
ioat_chan->dmacount = 0;
+ ioat_chan->desccount = 0;
ioat_chan->watchdog_completion = 0;
ioat_chan->last_compl_desc_addr_hw = 0;
ioat_chan->watchdog_tcp_cookie =
@@ -1334,10 +1341,12 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
*/
#define IOAT_TEST_SIZE 2000
+DECLARE_COMPLETION(test_completion);
static void ioat_dma_test_callback(void *dma_async_param)
{
printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
dma_async_param);
+ complete(&test_completion);
}
/**
@@ -1403,7 +1412,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device)
goto free_resources;
}
device->common.device_issue_pending(dma_chan);
- msleep(1);
+
+ wait_for_completion_timeout(&test_completion, msecs_to_jiffies(3000));
if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
!= DMA_SUCCESS) {
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index 71fba82462c..6be31726220 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -85,18 +85,28 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt;
dma_addr_t addr;
+ dma_addr_t dest;
+ src_cnt = unmap->unmap_src_cnt;
+ dest = iop_desc_get_dest_addr(unmap, iop_chan);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- addr = iop_desc_get_dest_addr(unmap, iop_chan);
- dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+ enum dma_data_direction dir;
+
+ if (src_cnt > 1) /* is xor? */
+ dir = DMA_BIDIRECTIONAL;
+ else
+ dir = DMA_FROM_DEVICE;
+
+ dma_unmap_page(dev, dest, len, dir);
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- src_cnt = unmap->unmap_src_cnt;
while (src_cnt--) {
addr = iop_desc_get_src_addr(unmap,
iop_chan,
src_cnt);
+ if (addr == dest)
+ continue;
dma_unmap_page(dev, addr, len,
DMA_TO_DEVICE);
}
@@ -411,6 +421,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
int slot_cnt;
int slots_per_op;
dma_cookie_t cookie;
+ dma_addr_t next_dma;
grp_start = sw_desc->group_head;
slot_cnt = grp_start->slot_cnt;
@@ -425,12 +436,12 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
&old_chain_tail->chain_node);
/* fix up the hardware chain */
- iop_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
+ next_dma = grp_start->async_tx.phys;
+ iop_desc_set_next_desc(old_chain_tail, next_dma);
+ BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
- /* 1/ don't add pre-chained descriptors
- * 2/ dummy read to flush next_desc write
- */
- BUG_ON(iop_desc_get_next_desc(sw_desc));
+ /* check for pre-chained descriptors */
+ iop_paranoia(iop_desc_get_next_desc(sw_desc));
/* increment the pending count by the number of slots
* memcpy operations have a 1:1 (slot:operation) relation
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
index e763d723e4c..9f6fe46a9b8 100644
--- a/drivers/dma/iovlock.c
+++ b/drivers/dma/iovlock.c
@@ -55,7 +55,6 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
int nr_iovecs = 0;
int iovec_len_used = 0;
int iovec_pages_used = 0;
- long err;
/* don't pin down non-user-based iovecs */
if (segment_eq(get_fs(), KERNEL_DS))
@@ -72,23 +71,21 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
local_list = kmalloc(sizeof(*local_list)
+ (nr_iovecs * sizeof (struct dma_page_list))
+ (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
- if (!local_list) {
- err = -ENOMEM;
+ if (!local_list)
goto out;
- }
/* list of pages starts right after the page list array */
pages = (struct page **) &local_list->page_list[nr_iovecs];
+ local_list->nr_iovecs = 0;
+
for (i = 0; i < nr_iovecs; i++) {
struct dma_page_list *page_list = &local_list->page_list[i];
len -= iov[i].iov_len;
- if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len)) {
- err = -EFAULT;
+ if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
goto unpin;
- }
page_list->nr_pages = num_pages_spanned(&iov[i]);
page_list->base_address = iov[i].iov_base;
@@ -109,10 +106,8 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
NULL);
up_read(&current->mm->mmap_sem);
- if (ret != page_list->nr_pages) {
- err = -ENOMEM;
+ if (ret != page_list->nr_pages)
goto unpin;
- }
local_list->nr_iovecs = i + 1;
}
@@ -122,7 +117,7 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
unpin:
dma_unpin_iovec_pages(local_list);
out:
- return ERR_PTR(err);
+ return NULL;
}
void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 0328da020a1..bcda1742641 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -311,17 +311,26 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
enum dma_ctrl_flags flags = desc->async_tx.flags;
u32 src_cnt;
dma_addr_t addr;
+ dma_addr_t dest;
+ src_cnt = unmap->unmap_src_cnt;
+ dest = mv_desc_get_dest_addr(unmap);
if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- addr = mv_desc_get_dest_addr(unmap);
- dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
+ enum dma_data_direction dir;
+
+ if (src_cnt > 1) /* is xor ? */
+ dir = DMA_BIDIRECTIONAL;
+ else
+ dir = DMA_FROM_DEVICE;
+ dma_unmap_page(dev, dest, len, dir);
}
if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- src_cnt = unmap->unmap_src_cnt;
while (src_cnt--) {
addr = mv_desc_get_src_addr(unmap,
src_cnt);
+ if (addr == dest)
+ continue;
dma_unmap_page(dev, addr, len,
DMA_TO_DEVICE);
}