summaryrefslogtreecommitdiffstats
path: root/crypto
diff options
context:
space:
mode:
authorLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-18 03:51:42 +0100
committerLachlan McIlroy <lachlan@redback.melbourne.sgi.com>2008-02-18 03:51:42 +0100
commitc58310bf4933986513020fa90b4190c7492995ae (patch)
tree143f2c7578d02ebef5db8fc57ae69e951ae0e2ee /crypto
parent[XFS] Added quota targets and removed dmapi directory (diff)
parentMerge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/gi... (diff)
downloadlinux-c58310bf4933986513020fa90b4190c7492995ae.tar.xz
linux-c58310bf4933986513020fa90b4190c7492995ae.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-linus
Diffstat (limited to 'crypto')
-rw-r--r--crypto/async_tx/async_memcpy.c38
-rw-r--r--crypto/async_tx/async_memset.c28
-rw-r--r--crypto/async_tx/async_tx.c9
-rw-r--r--crypto/async_tx/async_xor.c124
-rw-r--r--crypto/cbc.c2
-rw-r--r--crypto/cryptd.c4
-rw-r--r--crypto/ecb.c2
-rw-r--r--crypto/hmac.c2
-rw-r--r--crypto/lrw.c2
-rw-r--r--crypto/pcbc.c2
-rw-r--r--crypto/xcbc.c2
11 files changed, 118 insertions, 97 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 047e533fcc5b..0f6282207b32 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -35,7 +35,7 @@
* @src: src page
* @offset: offset in pages to start transaction
* @len: length in bytes
- * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
+ * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
* @depend_tx: memcpy depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine
@@ -46,33 +46,29 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY);
+ struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY,
+ &dest, 1, &src, 1, len);
struct dma_device *device = chan ? chan->device : NULL;
- int int_en = cb_fn ? 1 : 0;
- struct dma_async_tx_descriptor *tx = device ?
- device->device_prep_dma_memcpy(chan, len,
- int_en) : NULL;
+ struct dma_async_tx_descriptor *tx = NULL;
- if (tx) { /* run the memcpy asynchronously */
- dma_addr_t addr;
- enum dma_data_direction dir;
+ if (device) {
+ dma_addr_t dma_dest, dma_src;
+ unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
- pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
-
- dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
- DMA_NONE : DMA_FROM_DEVICE;
-
- addr = dma_map_page(device->dev, dest, dest_offset, len, dir);
- tx->tx_set_dest(addr, tx, 0);
+ dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
+ DMA_FROM_DEVICE);
- dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
- DMA_NONE : DMA_TO_DEVICE;
+ dma_src = dma_map_page(device->dev, src, src_offset, len,
+ DMA_TO_DEVICE);
- addr = dma_map_page(device->dev, src, src_offset, len, dir);
- tx->tx_set_src(addr, tx, 0);
+ tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
+ len, dma_prep_flags);
+ }
+ if (tx) {
+ pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
- } else { /* run the memcpy synchronously */
+ } else {
void *dest_buf, *src_buf;
pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len);
diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c
index 66ef6351202e..09c0e83664bc 100644
--- a/crypto/async_tx/async_memset.c
+++ b/crypto/async_tx/async_memset.c
@@ -35,7 +35,7 @@
* @val: fill value
* @offset: offset in pages to start transaction
* @len: length in bytes
- * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
+ * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: memset depends on the result of this transaction
* @cb_fn: function to call when the memcpy completes
* @cb_param: parameter to pass to the callback routine
@@ -46,24 +46,24 @@ async_memset(struct page *dest, int val, unsigned int offset,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET);
+ struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET,
+ &dest, 1, NULL, 0, len);
struct dma_device *device = chan ? chan->device : NULL;
- int int_en = cb_fn ? 1 : 0;
- struct dma_async_tx_descriptor *tx = device ?
- device->device_prep_dma_memset(chan, val, len,
- int_en) : NULL;
+ struct dma_async_tx_descriptor *tx = NULL;
- if (tx) { /* run the memset asynchronously */
- dma_addr_t dma_addr;
- enum dma_data_direction dir;
+ if (device) {
+ dma_addr_t dma_dest;
+ unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
- pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
- dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
- DMA_NONE : DMA_FROM_DEVICE;
+ dma_dest = dma_map_page(device->dev, dest, offset, len,
+ DMA_FROM_DEVICE);
- dma_addr = dma_map_page(device->dev, dest, offset, len, dir);
- tx->tx_set_dest(dma_addr, tx, 0);
+ tx = device->device_prep_dma_memset(chan, dma_dest, val, len,
+ dma_prep_flags);
+ }
+ if (tx) {
+ pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
} else { /* run the memset synchronously */
void *dest_buf;
diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c
index bc18cbb8ea79..562882189de5 100644
--- a/crypto/async_tx/async_tx.c
+++ b/crypto/async_tx/async_tx.c
@@ -57,8 +57,7 @@ static struct chan_ref_percpu *channel_table[DMA_TX_TYPE_END];
*/
static spinlock_t async_tx_lock;
-static struct list_head
-async_tx_master_list = LIST_HEAD_INIT(async_tx_master_list);
+static LIST_HEAD(async_tx_master_list);
/* async_tx_issue_pending_all - start all transactions on all channels */
void async_tx_issue_pending_all(void)
@@ -362,13 +361,13 @@ static void __exit async_tx_exit(void)
}
/**
- * async_tx_find_channel - find a channel to carry out the operation or let
+ * __async_tx_find_channel - find a channel to carry out the operation or let
* the transaction execute synchronously
* @depend_tx: transaction dependency
* @tx_type: transaction type
*/
struct dma_chan *
-async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
+__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
enum dma_transaction_type tx_type)
{
/* see if we can keep the chain on one channel */
@@ -384,7 +383,7 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
} else
return NULL;
}
-EXPORT_SYMBOL_GPL(async_tx_find_channel);
+EXPORT_SYMBOL_GPL(__async_tx_find_channel);
#else
static int __init async_tx_init(void)
{
diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
index 2575f674dcd5..2259a4ff15cb 100644
--- a/crypto/async_tx/async_xor.c
+++ b/crypto/async_tx/async_xor.c
@@ -30,35 +30,51 @@
#include <linux/raid/xor.h>
#include <linux/async_tx.h>
-static void
-do_async_xor(struct dma_async_tx_descriptor *tx, struct dma_device *device,
+/* do_async_xor - dma map the pages and perform the xor with an engine.
+ * This routine is marked __always_inline so it can be compiled away
+ * when CONFIG_DMA_ENGINE=n
+ */
+static __always_inline struct dma_async_tx_descriptor *
+do_async_xor(struct dma_device *device,
struct dma_chan *chan, struct page *dest, struct page **src_list,
unsigned int offset, unsigned int src_cnt, size_t len,
enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
- dma_addr_t dma_addr;
- enum dma_data_direction dir;
+ dma_addr_t dma_dest;
+ dma_addr_t *dma_src = (dma_addr_t *) src_list;
+ struct dma_async_tx_descriptor *tx;
int i;
+ unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
pr_debug("%s: len: %zu\n", __FUNCTION__, len);
- dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
- DMA_NONE : DMA_FROM_DEVICE;
-
- dma_addr = dma_map_page(device->dev, dest, offset, len, dir);
- tx->tx_set_dest(dma_addr, tx, 0);
-
- dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
- DMA_NONE : DMA_TO_DEVICE;
+ dma_dest = dma_map_page(device->dev, dest, offset, len,
+ DMA_FROM_DEVICE);
- for (i = 0; i < src_cnt; i++) {
- dma_addr = dma_map_page(device->dev, src_list[i],
- offset, len, dir);
- tx->tx_set_src(dma_addr, tx, i);
+ for (i = 0; i < src_cnt; i++)
+ dma_src[i] = dma_map_page(device->dev, src_list[i], offset,
+ len, DMA_TO_DEVICE);
+
+ /* Since we have clobbered the src_list we are committed
+ * to doing this asynchronously. Drivers force forward progress
+ * in case they can not provide a descriptor
+ */
+ tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
+ dma_prep_flags);
+ if (!tx) {
+ if (depend_tx)
+ dma_wait_for_async_tx(depend_tx);
+
+ while (!tx)
+ tx = device->device_prep_dma_xor(chan, dma_dest,
+ dma_src, src_cnt, len,
+ dma_prep_flags);
}
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
+
+ return tx;
}
static void
@@ -102,7 +118,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
* @src_cnt: number of source pages
* @len: length in bytes
* @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
- * ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
+ * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: xor depends on the result of this transaction.
* @cb_fn: function to call when the xor completes
* @cb_param: parameter to pass to the callback routine
@@ -113,14 +129,16 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR);
+ struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
+ &dest, 1, src_list,
+ src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL;
dma_async_tx_callback _cb_fn;
void *_cb_param;
unsigned long local_flags;
int xor_src_cnt;
- int i = 0, src_off = 0, int_en;
+ int i = 0, src_off = 0;
BUG_ON(src_cnt <= 1);
@@ -140,20 +158,11 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
_cb_param = cb_param;
}
- int_en = _cb_fn ? 1 : 0;
-
- tx = device->device_prep_dma_xor(
- chan, xor_src_cnt, len, int_en);
-
- if (tx) {
- do_async_xor(tx, device, chan, dest,
- &src_list[src_off], offset, xor_src_cnt, len,
- local_flags, depend_tx, _cb_fn,
- _cb_param);
- } else /* fall through */
- goto xor_sync;
+ tx = do_async_xor(device, chan, dest,
+ &src_list[src_off], offset,
+ xor_src_cnt, len, local_flags,
+ depend_tx, _cb_fn, _cb_param);
} else { /* run the xor synchronously */
-xor_sync:
/* in the sync case the dest is an implied source
* (assumes the dest is at the src_off index)
*/
@@ -242,7 +251,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
* @src_cnt: number of source pages
* @len: length in bytes
* @result: 0 if sum == 0 else non-zero
- * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
+ * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
* @depend_tx: xor depends on the result of this transaction.
* @cb_fn: function to call when the xor completes
* @cb_param: parameter to pass to the callback routine
@@ -254,29 +263,36 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
struct dma_async_tx_descriptor *depend_tx,
dma_async_tx_callback cb_fn, void *cb_param)
{
- struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM);
+ struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM,
+ &dest, 1, src_list,
+ src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL;
- int int_en = cb_fn ? 1 : 0;
- struct dma_async_tx_descriptor *tx = device ?
- device->device_prep_dma_zero_sum(chan, src_cnt, len, result,
- int_en) : NULL;
- int i;
+ struct dma_async_tx_descriptor *tx = NULL;
BUG_ON(src_cnt <= 1);
- if (tx) {
- dma_addr_t dma_addr;
- enum dma_data_direction dir;
+ if (device) {
+ dma_addr_t *dma_src = (dma_addr_t *) src_list;
+ unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
+ int i;
pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
- dir = (flags & ASYNC_TX_ASSUME_COHERENT) ?
- DMA_NONE : DMA_TO_DEVICE;
-
- for (i = 0; i < src_cnt; i++) {
- dma_addr = dma_map_page(device->dev, src_list[i],
- offset, len, dir);
- tx->tx_set_src(dma_addr, tx, i);
+ for (i = 0; i < src_cnt; i++)
+ dma_src[i] = dma_map_page(device->dev, src_list[i],
+ offset, len, DMA_TO_DEVICE);
+
+ tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
+ len, result,
+ dma_prep_flags);
+ if (!tx) {
+ if (depend_tx)
+ dma_wait_for_async_tx(depend_tx);
+
+ while (!tx)
+ tx = device->device_prep_dma_zero_sum(chan,
+ dma_src, src_cnt, len, result,
+ dma_prep_flags);
}
async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
@@ -311,6 +327,16 @@ EXPORT_SYMBOL_GPL(async_xor_zero_sum);
static int __init async_xor_init(void)
{
+ #ifdef CONFIG_DMA_ENGINE
+ /* To conserve stack space the input src_list (array of page pointers)
+ * is reused to hold the array of dma addresses passed to the driver.
+ * This conversion is only possible when dma_addr_t is less than the
+ * the size of a pointer. HIGHMEM64G is known to violate this
+ * assumption.
+ */
+ BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *));
+ #endif
+
return 0;
}
diff --git a/crypto/cbc.c b/crypto/cbc.c
index 6affff882cf8..61ac42e1e32b 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -224,7 +224,7 @@ static struct crypto_instance *crypto_cbc_alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = ERR_PTR(-EINVAL);
if (!is_power_of_2(alg->cra_blocksize))
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index 074298f2f8e3..250425263e00 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -230,7 +230,7 @@ static struct crypto_instance *cryptd_alloc_blkcipher(
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = cryptd_alloc_instance(alg, state);
if (IS_ERR(inst))
@@ -267,7 +267,7 @@ static struct crypto_instance *cryptd_alloc(struct rtattr **tb)
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
- return ERR_PTR(PTR_ERR(algt));
+ return ERR_CAST(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_BLKCIPHER:
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 6310387a872c..a46838e98a71 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -128,7 +128,7 @@ static struct crypto_instance *crypto_ecb_alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = crypto_alloc_instance("ecb", alg);
if (IS_ERR(inst))
diff --git a/crypto/hmac.c b/crypto/hmac.c
index a1d016a50e7d..b60c3c7aa320 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -213,7 +213,7 @@ static struct crypto_instance *hmac_alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH,
CRYPTO_ALG_TYPE_HASH_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = crypto_alloc_instance("hmac", alg);
if (IS_ERR(inst))
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 621095db28b3..9d52e580d10a 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -241,7 +241,7 @@ static struct crypto_instance *alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = crypto_alloc_instance("lrw", alg);
if (IS_ERR(inst))
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index fe704775f88f..d1b8bdfb5855 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -234,7 +234,7 @@ static struct crypto_instance *crypto_pcbc_alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
inst = crypto_alloc_instance("pcbc", alg);
if (IS_ERR(inst))
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index a82959df678c..86727403e5ab 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -301,7 +301,7 @@ static struct crypto_instance *xcbc_alloc(struct rtattr **tb)
alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
- return ERR_PTR(PTR_ERR(alg));
+ return ERR_CAST(alg);
switch(alg->cra_blocksize) {
case 16: