diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-07 23:31:18 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-09-07 23:31:18 +0200 |
commit | 6be48f2940af9ea8d93c23a0dd8e322672c92efd (patch) | |
tree | 1bdc85a9d3fd0c19e108ea27a29a83ef2b44f5d0 /drivers/crypto/nx/nx.c | |
parent | Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff) | |
parent | Reinstate "crypto: crct10dif - Wrap crc_t10dif function all to use crypto tra... (diff) | |
download | linux-6be48f2940af9ea8d93c23a0dd8e322672c92efd.tar.xz linux-6be48f2940af9ea8d93c23a0dd8e322672c92efd.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"Here is the crypto update for 3.12:
- Added MODULE_SOFTDEP to allow pre-loading of modules.
- Reinstated crct10dif driver using the module softdep feature.
- Allow via rng driver to be auto-loaded.
- Split large input data when necessary in nx.
- Handle zero length messages correctly for GCM/XCBC in nx.
- Handle SHA-2 chunks bigger than block size properly in nx.
- Handle unaligned lengths in omap-aes.
- Added SHA384/SHA512 to omap-sham.
- Added OMAP5/AM43XX SHAM support.
- Added OMAP4 TRNG support.
- Misc fixes"
* git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (66 commits)
Reinstate "crypto: crct10dif - Wrap crc_t10dif function all to use crypto transform framework"
hwrng: via - Add MODULE_DEVICE_TABLE
crypto: fcrypt - Fix bitoperation for compilation with clang
crypto: nx - fix SHA-2 for chunks bigger than block size
crypto: nx - fix GCM for zero length messages
crypto: nx - fix XCBC for zero length messages
crypto: nx - fix limits to sg lists for AES-CCM
crypto: nx - fix limits to sg lists for AES-XCBC
crypto: nx - fix limits to sg lists for AES-GCM
crypto: nx - fix limits to sg lists for AES-CTR
crypto: nx - fix limits to sg lists for AES-CBC
crypto: nx - fix limits to sg lists for AES-ECB
crypto: nx - add offset to nx_build_sg_lists()
padata - Register hotcpu notifier after initialization
padata - share code between CPU_ONLINE and CPU_DOWN_FAILED, same to CPU_DOWN_PREPARE and CPU_UP_CANCELED
hwrng: omap - reorder OMAP TRNG driver code
crypto: omap-sham - correct dma burst size
crypto: omap-sham - Enable Polling mode if DMA fails
crypto: tegra-aes - bitwise vs logical and
crypto: sahara - checking the wrong variable
...
Diffstat (limited to 'drivers/crypto/nx/nx.c')
-rw-r--r-- | drivers/crypto/nx/nx.c | 35 |
1 files changed, 28 insertions, 7 deletions
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c index bbdab6e5ccf0..5533fe31c90d 100644 --- a/drivers/crypto/nx/nx.c +++ b/drivers/crypto/nx/nx.c @@ -61,8 +61,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx, do { rc = vio_h_cop_sync(viodev, op); - } while ((rc == -EBUSY && !may_sleep && retries--) || - (rc == -EBUSY && may_sleep && cond_resched())); + } while (rc == -EBUSY && !may_sleep && retries--); if (rc) { dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " @@ -114,13 +113,29 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, * have been described (or @sgmax elements have been written), the * loop ends. min_t is used to ensure @end_addr falls on the same page * as sg_addr, if not, we need to create another nx_sg element for the - * data on the next page */ + * data on the next page. + * + * Also when using vmalloc'ed data, every time that a system page + * boundary is crossed the physical address needs to be re-calculated. + */ for (sg = sg_head; sg_len < len; sg++) { + u64 next_page; + sg->addr = sg_addr; - sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), end_addr); - sg->len = sg_addr - sg->addr; + sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), + end_addr); + + next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE; + sg->len = min_t(u64, sg_addr, next_page) - sg->addr; sg_len += sg->len; + if (sg_addr >= next_page && + is_vmalloc_addr(start_addr + sg_len)) { + sg_addr = page_to_phys(vmalloc_to_page( + start_addr + sg_len)); + end_addr = sg_addr + len - sg_len; + } + if ((sg - sg_head) == sgmax) { pr_err("nx: scatter/gather list overflow, pid: %d\n", current->pid); @@ -196,6 +211,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, * @dst: destination scatterlist * @src: source scatterlist * @nbytes: length of data described in the scatterlists + * @offset: number of bytes to fast-forward past at the beginning of + * scatterlists. * @iv: destination for the iv data, if the algorithm requires it * * This is common code shared by all the AES algorithms. It uses the block @@ -207,6 +224,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, + unsigned int offset, u8 *iv) { struct nx_sg *nx_insg = nx_ctx->in_sg; @@ -215,8 +233,10 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, if (iv) memcpy(iv, desc->info, AES_BLOCK_SIZE); - nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes); - nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes); + nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, + offset, nbytes); + nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, + offset, nbytes); /* these lengths should be negative, which will indicate to phyp that * the input and output parameters are scatterlists, not linear @@ -235,6 +255,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, */ void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) { + spin_lock_init(&nx_ctx->lock); memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; |