Skip to content

Commit

Permalink
dmaengine, async_tx: support alignment checks
Browse files Browse the repository at this point in the history
Some engines have transfer size and address alignment restrictions.  Add
a per-operation alignment property to struct dma_device that the async
routines and dmatest can use to check alignment capabilities.

Signed-off-by: Dan Williams <dan.j.williams@intel.com>
  • Loading branch information
djbw committed Sep 9, 2009
1 parent 9308add commit 83544ae
Show file tree
Hide file tree
Showing 6 changed files with 67 additions and 6 deletions.
2 changes: 1 addition & 1 deletion crypto/async_tx/async_memcpy.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;

if (device) {
if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
dma_addr_t dma_dest, dma_src;
unsigned long dma_prep_flags = 0;

Expand Down
2 changes: 1 addition & 1 deletion crypto/async_tx/async_memset.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ async_memset(struct page *dest, int val, unsigned int offset, size_t len,
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;

if (device) {
if (device && is_dma_fill_aligned(device, offset, 0, len)) {
dma_addr_t dma_dest;
unsigned long dma_prep_flags = 0;

Expand Down
6 changes: 4 additions & 2 deletions crypto/async_tx/async_pq.c
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,

if (dma_src && device &&
(src_cnt <= dma_maxpq(device, 0) ||
dma_maxpq(device, DMA_PREP_CONTINUE) > 0)) {
dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
/* run the p+q asynchronously */
pr_debug("%s: (async) disks: %d len: %zu\n",
__func__, disks, len);
Expand Down Expand Up @@ -274,7 +275,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
dma_src = (dma_addr_t *) blocks;

if (dma_src && device && disks <= dma_maxpq(device, 0)) {
if (dma_src && device && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned(device, offset, 0, len)) {
struct device *dev = device->dev;
dma_addr_t *pq = &dma_src[disks-2];
int i;
Expand Down
5 changes: 3 additions & 2 deletions crypto/async_tx/async_xor.c
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
dma_src = (dma_addr_t *) src_list;

if (dma_src && chan) {
if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) {
/* run the xor asynchronously */
pr_debug("%s (async): len: %zu\n", __func__, len);

Expand Down Expand Up @@ -265,7 +265,8 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
else if (sizeof(dma_addr_t) <= sizeof(struct page *))
dma_src = (dma_addr_t *) src_list;

if (dma_src && device && src_cnt <= device->max_xor) {
if (dma_src && device && src_cnt <= device->max_xor &&
is_dma_xor_aligned(device, offset, 0, len)) {
unsigned long dma_prep_flags = 0;
int i;

Expand Down
14 changes: 14 additions & 0 deletions drivers/dma/dmatest.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,13 +288,26 @@ static int dmatest_func(void *data)
dma_addr_t dma_dsts[dst_cnt];
struct completion cmp;
unsigned long tmo = msecs_to_jiffies(3000);
u8 align = 0;

total_tests++;

len = dmatest_random() % test_buf_size + 1;
src_off = dmatest_random() % (test_buf_size - len + 1);
dst_off = dmatest_random() % (test_buf_size - len + 1);

/* honor alignment restrictions */
if (thread->type == DMA_MEMCPY)
align = dev->copy_align;
else if (thread->type == DMA_XOR)
align = dev->xor_align;
else if (thread->type == DMA_PQ)
align = dev->pq_align;

len = (len >> align) << align;
src_off = (src_off >> align) << align;
dst_off = (dst_off >> align) << align;

dmatest_init_srcs(thread->srcs, src_off, len);
dmatest_init_dsts(thread->dsts, dst_off, len);

Expand All @@ -311,6 +324,7 @@ static int dmatest_func(void *data)
DMA_BIDIRECTIONAL);
}


if (thread->type == DMA_MEMCPY)
tx = dev->device_prep_dma_memcpy(chan,
dma_dsts[0] + dst_off,
Expand Down
44 changes: 44 additions & 0 deletions include/linux/dmaengine.h
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,10 @@ struct dma_async_tx_descriptor {
* @cap_mask: one or more dma_capability flags
* @max_xor: maximum number of xor sources, 0 if no capability
* @max_pq: maximum number of PQ sources and PQ-continue capability
* @copy_align: alignment shift for memcpy operations
* @xor_align: alignment shift for xor operations
* @pq_align: alignment shift for pq operations
* @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
* @device_alloc_chan_resources: allocate resources and return the
Expand All @@ -271,6 +275,10 @@ struct dma_device {
dma_cap_mask_t cap_mask;
unsigned short max_xor;
unsigned short max_pq;
u8 copy_align;
u8 xor_align;
u8 pq_align;
u8 fill_align;
#define DMA_HAS_PQ_CONTINUE (1 << 15)

int dev_id;
Expand Down Expand Up @@ -314,6 +322,42 @@ struct dma_device {
void (*device_issue_pending)(struct dma_chan *chan);
};

static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len)
{
size_t mask;

if (!align)
return true;
mask = (1 << align) - 1;
if (mask & (off1 | off2 | len))
return false;
return true;
}

static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1,
size_t off2, size_t len)
{
return dmaengine_check_align(dev->copy_align, off1, off2, len);
}

static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1,
size_t off2, size_t len)
{
return dmaengine_check_align(dev->xor_align, off1, off2, len);
}

static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1,
size_t off2, size_t len)
{
return dmaengine_check_align(dev->pq_align, off1, off2, len);
}

static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1,
size_t off2, size_t len)
{
return dmaengine_check_align(dev->fill_align, off1, off2, len);
}

static inline void
dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue)
{
Expand Down

0 comments on commit 83544ae

Please sign in to comment.