diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile
index bec06659e0eb84b4327ee5e4b7f10450d3499b7d..9c7d92d03f62eb7a5a9fb3b1724e271201d41747 100644
--- a/Documentation/DocBook/Makefile
+++ b/Documentation/DocBook/Makefile
@@ -15,7 +15,7 @@ DOCBOOKS := z8530book.xml device-drivers.xml \
 	    80211.xml debugobjects.xml sh.xml regulator.xml \
 	    alsa-driver-api.xml writing-an-alsa-driver.xml \
 	    tracepoint.xml drm.xml media_api.xml w1.xml \
-	    writing_musb_glue_layer.xml
+	    writing_musb_glue_layer.xml crypto-API.xml
 
 include Documentation/DocBook/media/Makefile
 
diff --git a/Documentation/DocBook/crypto-API.tmpl b/Documentation/DocBook/crypto-API.tmpl
new file mode 100644
index 0000000000000000000000000000000000000000..c763d30f4893f89452af5be3fb66b4a515dca41e
--- /dev/null
+++ b/Documentation/DocBook/crypto-API.tmpl
@@ -0,0 +1,1253 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+	"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="KernelCryptoAPI">
+ <bookinfo>
+  <title>Linux Kernel Crypto API</title>
+
+  <authorgroup>
+   <author>
+    <firstname>Stephan</firstname>
+    <surname>Mueller</surname>
+    <affiliation>
+     <address>
+      <email>smueller@chronox.de</email>
+     </address>
+    </affiliation>
+   </author>
+   <author>
+    <firstname>Marek</firstname>
+    <surname>Vasut</surname>
+    <affiliation>
+     <address>
+      <email>marek@denx.de</email>
+     </address>
+    </affiliation>
+   </author>
+  </authorgroup>
+
+  <copyright>
+   <year>2014</year>
+   <holder>Stephan Mueller</holder>
+  </copyright>
+
+
+  <legalnotice>
+   <para>
+     This documentation is free software; you can redistribute
+     it and/or modify it under the terms of the GNU General Public
+     License as published by the Free Software Foundation; either
+     version 2 of the License, or (at your option) any later
+     version.
+   </para>
+
+   <para>
+     This program is distributed in the hope that it will be
+     useful, but WITHOUT ANY WARRANTY; without even the implied
+     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+     See the GNU General Public License for more details.
+   </para>
+
+   <para>
+     You should have received a copy of the GNU General Public
+     License along with this program; if not, write to the Free
+     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+     MA 02111-1307 USA
+   </para>
+
+   <para>
+     For more details see the file COPYING in the source
+     distribution of Linux.
+   </para>
+  </legalnotice>
+ </bookinfo>
+
+ <toc></toc>
+
+ <chapter id="Intro">
+  <title>Kernel Crypto API Interface Specification</title>
+
+   <sect1><title>Introduction</title>
+
+    <para>
+     The kernel crypto API offers a rich set of cryptographic ciphers as
+     well as other data transformation mechanisms and methods to invoke
+     these. This document contains a description of the API and provides
+     example code.
+    </para>
+
+    <para>
+     To understand and properly use the kernel crypto API a brief
+     explanation of its structure is given. Based on the architecture,
+     the API can be separated into different components. Following the
+     architecture specification, hints to developers of ciphers are
+     provided. Pointers to the API function call  documentation are
+     given at the end.
+    </para>
+
+    <para>
+     The kernel crypto API refers to all algorithms as "transformations".
+     Therefore, a cipher handle variable usually has the name "tfm".
+     Besides cryptographic operations, the kernel crypto API also knows
+     compression transformations and handles them the same way as ciphers.
+    </para>
+
+    <para>
+     The kernel crypto API serves the following entity types:
+
+     <itemizedlist>
+      <listitem>
+       <para>consumers requesting cryptographic services</para>
+      </listitem>
+      <listitem>
+      <para>data transformation implementations (typically ciphers)
+       that can be called by consumers using the kernel crypto
+       API</para>
+      </listitem>
+     </itemizedlist>
+    </para>
+
+    <para>
+     This specification is intended for consumers of the kernel crypto
+     API as well as for developers implementing ciphers. This API
+     specification, however, does not discusses all API calls available
+     to data transformation implementations (i.e. implementations of
+     ciphers and other transformations (such as CRC or even compression
+     algorithms) that can register with the kernel crypto API).
+    </para>
+
+    <para>
+     Note: The terms "transformation" and cipher algorithm are used
+     interchangably.
+    </para>
+   </sect1>
+
+   <sect1><title>Terminology</title>
+    <para>
+     The transformation implementation is an actual code or interface
+     to hardware which implements a certain transformation with precisely
+     defined behavior.
+    </para>
+
+    <para>
+     The transformation object (TFM) is an instance of a transformation
+     implementation. There can be multiple transformation objects
+     associated with a single transformation implementation. Each of
+     those transformation objects is held by a crypto API consumer or
+     another transformation. Transformation object is allocated when a
+     crypto API consumer requests a transformation implementation.
+     The consumer is then provided with a structure, which contains
+     a transformation object (TFM).
+    </para>
+
+    <para>
+     The structure that contains transformation objects may also be
+     referred to as a "cipher handle". Such a cipher handle is always
+     subject to the following phases that are reflected in the API calls
+     applicable to such a cipher handle:
+    </para>
+
+    <orderedlist>
+     <listitem>
+      <para>Initialization of a cipher handle.</para>
+     </listitem>
+     <listitem>
+      <para>Execution of all intended cipher operations applicable
+      for the handle where the cipher handle must be furnished to
+      every API call.</para>
+     </listitem>
+     <listitem>
+      <para>Destruction of a cipher handle.</para>
+     </listitem>
+    </orderedlist>
+
+    <para>
+     When using the initialization API calls, a cipher handle is
+     created and returned to the consumer. Therefore, please refer
+     to all initialization API calls that refer to the data
+     structure type a consumer is expected to receive and subsequently
+     to use. The initialization API calls have all the same naming
+     conventions of crypto_alloc_*.
+    </para>
+
+    <para>
+     The transformation context is private data associated with
+     the transformation object.
+    </para>
+   </sect1>
+  </chapter>
+
+  <chapter id="Architecture"><title>Kernel Crypto API Architecture</title>
+   <sect1><title>Cipher algorithm types</title>
+    <para>
+     The kernel crypto API provides different API calls for the
+     following cipher types:
+
+     <itemizedlist>
+      <listitem><para>Symmetric ciphers</para></listitem>
+      <listitem><para>AEAD ciphers</para></listitem>
+      <listitem><para>Message digest, including keyed message digest</para></listitem>
+      <listitem><para>Random number generation</para></listitem>
+      <listitem><para>User space interface</para></listitem>
+     </itemizedlist>
+    </para>
+   </sect1>
+
+   <sect1><title>Ciphers And Templates</title>
+    <para>
+     The kernel crypto API provides implementations of single block
+     ciphers and message digests. In addition, the kernel crypto API
+     provides numerous "templates" that can be used in conjunction
+     with the single block ciphers and message digests. Templates
+     include all types of block chaining mode, the HMAC mechanism, etc.
+    </para>
+
+    <para>
+     Single block ciphers and message digests can either be directly
+     used by a caller or invoked together with a template to form
+     multi-block ciphers or keyed message digests.
+    </para>
+
+    <para>
+     A single block cipher may even be called with multiple templates.
+     However, templates cannot be used without a single cipher.
+    </para>
+
+    <para>
+     See /proc/crypto and search for "name". For example:
+
+     <itemizedlist>
+      <listitem><para>aes</para></listitem>
+      <listitem><para>ecb(aes)</para></listitem>
+      <listitem><para>cmac(aes)</para></listitem>
+      <listitem><para>ccm(aes)</para></listitem>
+      <listitem><para>rfc4106(gcm(aes))</para></listitem>
+      <listitem><para>sha1</para></listitem>
+      <listitem><para>hmac(sha1)</para></listitem>
+      <listitem><para>authenc(hmac(sha1),cbc(aes))</para></listitem>
+     </itemizedlist>
+    </para>
+
+    <para>
+     In these examples, "aes" and "sha1" are the ciphers and all
+     others are the templates.
+    </para>
+   </sect1>
+
+   <sect1><title>Synchronous And Asynchronous Operation</title>
+    <para>
+     The kernel crypto API provides synchronous and asynchronous
+     API operations.
+    </para>
+
+    <para>
+     When using the synchronous API operation, the caller invokes
+     a cipher operation which is performed synchronously by the
+     kernel crypto API. That means, the caller waits until the
+     cipher operation completes. Therefore, the kernel crypto API
+     calls work like regular function calls. For synchronous
+     operation, the set of API calls is small and conceptually
+     similar to any other crypto library.
+    </para>
+
+    <para>
+     Asynchronous operation is provided by the kernel crypto API
+     which implies that the invocation of a cipher operation will
+     complete almost instantly. That invocation triggers the
+     cipher operation but it does not signal its completion. Before
+     invoking a cipher operation, the caller must provide a callback
+     function the kernel crypto API can invoke to signal the
+     completion of the cipher operation. Furthermore, the caller
+     must ensure it can handle such asynchronous events by applying
+     appropriate locking around its data. The kernel crypto API
+     does not perform any special serialization operation to protect
+     the caller's data integrity.
+    </para>
+   </sect1>
+
+   <sect1><title>Crypto API Cipher References And Priority</title>
+    <para>
+     A cipher is referenced by the caller with a string. That string
+     has the following semantics:
+
+     <programlisting>
+	template(single block cipher)
+     </programlisting>
+
+     where "template" and "single block cipher" is the aforementioned
+     template and single block cipher, respectively. If applicable,
+     additional templates may enclose other templates, such as
+
+      <programlisting>
+	template1(template2(single block cipher)))
+      </programlisting>
+    </para>
+
+    <para>
+     The kernel crypto API may provide multiple implementations of a
+     template or a single block cipher. For example, AES on newer
+     Intel hardware has the following implementations: AES-NI,
+     assembler implementation, or straight C. Now, when using the
+     string "aes" with the kernel crypto API, which cipher
+     implementation is used? The answer to that question is the
+     priority number assigned to each cipher implementation by the
+     kernel crypto API. When a caller uses the string to refer to a
+     cipher during initialization of a cipher handle, the kernel
+     crypto API looks up all implementations providing an
+     implementation with that name and selects the implementation
+     with the highest priority.
+    </para>
+
+    <para>
+     Now, a caller may have the need to refer to a specific cipher
+     implementation and thus does not want to rely on the
+     priority-based selection. To accommodate this scenario, the
+     kernel crypto API allows the cipher implementation to register
+     a unique name in addition to common names. When using that
+     unique name, a caller is therefore always sure to refer to
+     the intended cipher implementation.
+    </para>
+
+    <para>
+     The list of available ciphers is given in /proc/crypto. However,
+     that list does not specify all possible permutations of
+     templates and ciphers. Each block listed in /proc/crypto may
+     contain the following information -- if one of the components
+     listed as follows are not applicable to a cipher, it is not
+     displayed:
+    </para>
+
+    <itemizedlist>
+     <listitem>
+      <para>name: the generic name of the cipher that is subject
+       to the priority-based selection -- this name can be used by
+       the cipher allocation API calls (all names listed above are
+       examples for such generic names)</para>
+     </listitem>
+     <listitem>
+      <para>driver: the unique name of the cipher -- this name can
+       be used by the cipher allocation API calls</para>
+     </listitem>
+     <listitem>
+      <para>module: the kernel module providing the cipher
+       implementation (or "kernel" for statically linked ciphers)</para>
+     </listitem>
+     <listitem>
+      <para>priority: the priority value of the cipher implementation</para>
+     </listitem>
+     <listitem>
+      <para>refcnt: the reference count of the respective cipher
+       (i.e. the number of current consumers of this cipher)</para>
+     </listitem>
+     <listitem>
+      <para>selftest: specification whether the self test for the
+       cipher passed</para>
+     </listitem>
+     <listitem>
+      <para>type:
+       <itemizedlist>
+        <listitem>
+         <para>blkcipher for synchronous block ciphers</para>
+        </listitem>
+        <listitem>
+         <para>ablkcipher for asynchronous block ciphers</para>
+        </listitem>
+        <listitem>
+         <para>cipher for single block ciphers that may be used with
+          an additional template</para>
+        </listitem>
+        <listitem>
+         <para>shash for synchronous message digest</para>
+        </listitem>
+        <listitem>
+         <para>ahash for asynchronous message digest</para>
+        </listitem>
+        <listitem>
+         <para>aead for AEAD cipher type</para>
+        </listitem>
+        <listitem>
+         <para>compression for compression type transformations</para>
+        </listitem>
+        <listitem>
+         <para>rng for random number generator</para>
+        </listitem>
+        <listitem>
+         <para>givcipher for cipher with associated IV generator
+          (see the geniv entry below for the specification of the
+          IV generator type used by the cipher implementation)</para>
+        </listitem>
+       </itemizedlist>
+      </para>
+     </listitem>
+     <listitem>
+      <para>blocksize: blocksize of cipher in bytes</para>
+     </listitem>
+     <listitem>
+      <para>keysize: key size in bytes</para>
+     </listitem>
+     <listitem>
+      <para>ivsize: IV size in bytes</para>
+     </listitem>
+     <listitem>
+      <para>seedsize: required size of seed data for random number
+       generator</para>
+     </listitem>
+     <listitem>
+      <para>digestsize: output size of the message digest</para>
+     </listitem>
+     <listitem>
+      <para>geniv: IV generation type:
+       <itemizedlist>
+        <listitem>
+         <para>eseqiv for encrypted sequence number based IV
+          generation</para>
+        </listitem>
+        <listitem>
+         <para>seqiv for sequence number based IV generation</para>
+        </listitem>
+        <listitem>
+         <para>chainiv for chain iv generation</para>
+        </listitem>
+        <listitem>
+         <para>&lt;builtin&gt; is a marker that the cipher implements
+          IV generation and handling as it is specific to the given
+          cipher</para>
+        </listitem>
+       </itemizedlist>
+      </para>
+     </listitem>
+    </itemizedlist>
+   </sect1>
+
+   <sect1><title>Key Sizes</title>
+    <para>
+     When allocating a cipher handle, the caller only specifies the
+     cipher type. Symmetric ciphers, however, typically support
+     multiple key sizes (e.g. AES-128 vs. AES-192 vs. AES-256).
+     These key sizes are determined with the length of the provided
+     key. Thus, the kernel crypto API does not provide a separate
+     way to select the particular symmetric cipher key size.
+    </para>
+   </sect1>
+
+   <sect1><title>Cipher Allocation Type And Masks</title>
+    <para>
+     The different cipher handle allocation functions allow the
+     specification of a type and mask flag. Both parameters have
+     the following meaning (and are therefore not covered in the
+     subsequent sections).
+    </para>
+
+    <para>
+     The type flag specifies the type of the cipher algorithm.
+     The caller usually provides a 0 when the caller wants the
+     default handling. Otherwise, the caller may provide the
+     following selections which match the the aforementioned
+     cipher types:
+    </para>
+
+    <itemizedlist>
+     <listitem>
+      <para>CRYPTO_ALG_TYPE_CIPHER Single block cipher</para>
+     </listitem>
+     <listitem>
+      <para>CRYPTO_ALG_TYPE_COMPRESS Compression</para>
+     </listitem>
+     <listitem>
+     <para>CRYPTO_ALG_TYPE_AEAD Authenticated Encryption with
+      Associated Data (MAC)</para>
+     </listitem>
+     <listitem>
+      <para>CRYPTO_ALG_TYPE_BLKCIPHER Synchronous multi-block cipher</para>
+     </listitem>
+     <listitem>
+      <para>CRYPTO_ALG_TYPE_ABLKCIPHER Asynchronous multi-block cipher</para>
+     </listitem>
+     <listitem>
+      <para>CRYPTO_ALG_TYPE_GIVCIPHER Asynchronous multi-block
+       cipher packed together with an IV generator (see geniv field
+       in the /proc/crypto listing for the known IV generators)</para>
+     </listitem>
+     <listitem>
+      <para>CRYPTO_ALG_TYPE_DIGEST Raw message digest</para>
+     </listitem>
+     <listitem>
+      <para>CRYPTO_ALG_TYPE_HASH Alias for CRYPTO_ALG_TYPE_DIGEST</para>
+     </listitem>
+     <listitem>
+      <para>CRYPTO_ALG_TYPE_SHASH Synchronous multi-block hash</para>
+     </listitem>
+     <listitem>
+      <para>CRYPTO_ALG_TYPE_AHASH Asynchronous multi-block hash</para>
+     </listitem>
+     <listitem>
+      <para>CRYPTO_ALG_TYPE_RNG Random Number Generation</para>
+     </listitem>
+     <listitem>
+      <para>CRYPTO_ALG_TYPE_PCOMPRESS Enhanced version of
+       CRYPTO_ALG_TYPE_COMPRESS allowing for segmented compression /
+       decompression instead of performing the operation on one
+       segment only. CRYPTO_ALG_TYPE_PCOMPRESS is intended to replace
+       CRYPTO_ALG_TYPE_COMPRESS once existing consumers are converted.</para>
+     </listitem>
+    </itemizedlist>
+
+    <para>
+     The mask flag restricts the type of cipher. The only allowed
+     flag is CRYPTO_ALG_ASYNC to restrict the cipher lookup function
+     to asynchronous ciphers. Usually, a caller provides a 0 for the
+     mask flag.
+    </para>
+
+    <para>
+     When the caller provides a mask and type specification, the
+     caller limits the search the kernel crypto API can perform for
+     a suitable cipher implementation for the given cipher name.
+     That means, even when a caller uses a cipher name that exists
+     during its initialization call, the kernel crypto API may not
+     select it due to the used type and mask field.
+    </para>
+   </sect1>
+  </chapter>
+
+  <chapter id="Development"><title>Developing Cipher Algorithms</title>
+   <sect1><title>Registering And Unregistering Transformation</title>
+    <para>
+     There are three distinct types of registration functions in
+     the Crypto API. One is used to register a generic cryptographic
+     transformation, while the other two are specific to HASH
+     transformations and COMPRESSion. We will discuss the latter
+     two in a separate chapter, here we will only look at the
+     generic ones.
+    </para>
+
+    <para>
+     Before discussing the register functions, the data structure
+     to be filled with each, struct crypto_alg, must be considered
+     -- see below for a description of this data structure.
+    </para>
+
+    <para>
+     The generic registration functions can be found in
+     include/linux/crypto.h and their definition can be seen below.
+     The former function registers a single transformation, while
+     the latter works on an array of transformation descriptions.
+     The latter is useful when registering transformations in bulk.
+    </para>
+
+    <programlisting>
+   int crypto_register_alg(struct crypto_alg *alg);
+   int crypto_register_algs(struct crypto_alg *algs, int count);
+    </programlisting>
+
+    <para>
+     The counterparts to those functions are listed below.
+    </para>
+
+    <programlisting>
+   int crypto_unregister_alg(struct crypto_alg *alg);
+   int crypto_unregister_algs(struct crypto_alg *algs, int count);
+    </programlisting>
+
+    <para>
+     Notice that both registration and unregistration functions
+     do return a value, so make sure to handle errors. A return
+     code of zero implies success. Any return code &lt; 0 implies
+     an error.
+    </para>
+
+    <para>
+     The bulk registration / unregistration functions require
+     that struct crypto_alg is an array of count size. These
+     functions simply loop over that array and register /
+     unregister each individual algorithm. If an error occurs,
+     the loop is terminated at the offending algorithm definition.
+     That means, the algorithms prior to the offending algorithm
+     are successfully registered. Note, the caller has no way of
+     knowing which cipher implementations have successfully
+     registered. If this is important to know, the caller should
+     loop through the different implementations using the single
+     instance *_alg functions for each individual implementation.
+    </para>
+   </sect1>
+
+   <sect1><title>Single-Block Symmetric Ciphers [CIPHER]</title>
+    <para>
+     Example of transformations: aes, arc4, ...
+    </para>
+
+    <para>
+     This section describes the simplest of all transformation
+     implementations, that being the CIPHER type used for symmetric
+     ciphers. The CIPHER type is used for transformations which
+     operate on exactly one block at a time and there are no
+     dependencies between blocks at all.
+    </para>
+
+    <sect2><title>Registration specifics</title>
+     <para>
+      The registration of [CIPHER] algorithm is specific in that
+      struct crypto_alg field .cra_type is empty. The .cra_u.cipher
+      has to be filled in with proper callbacks to implement this
+      transformation.
+     </para>
+
+     <para>
+      See struct cipher_alg below.
+     </para>
+    </sect2>
+
+    <sect2><title>Cipher Definition With struct cipher_alg</title>
+     <para>
+      Struct cipher_alg defines a single block cipher.
+     </para>
+
+     <para>
+      Here are schematics of how these functions are called when
+      operated from other part of the kernel. Note that the
+      .cia_setkey() call might happen before or after any of these
+      schematics happen, but must not happen during any of these
+      are in-flight.
+     </para>
+
+     <para>
+      <programlisting>
+         KEY ---.    PLAINTEXT ---.
+                v                 v
+          .cia_setkey() -&gt; .cia_encrypt()
+                                  |
+                                  '-----&gt; CIPHERTEXT
+      </programlisting>
+     </para>
+
+     <para>
+      Please note that a pattern where .cia_setkey() is called
+      multiple times is also valid:
+     </para>
+
+     <para>
+      <programlisting>
+
+  KEY1 --.    PLAINTEXT1 --.         KEY2 --.    PLAINTEXT2 --.
+         v                 v                v                 v
+   .cia_setkey() -&gt; .cia_encrypt() -&gt; .cia_setkey() -&gt; .cia_encrypt()
+                           |                                  |
+                           '---&gt; CIPHERTEXT1                  '---&gt; CIPHERTEXT2
+      </programlisting>
+     </para>
+
+    </sect2>
+   </sect1>
+
+   <sect1><title>Multi-Block Ciphers [BLKCIPHER] [ABLKCIPHER]</title>
+    <para>
+     Example of transformations: cbc(aes), ecb(arc4), ...
+    </para>
+
+    <para>
+     This section describes the multi-block cipher transformation
+     implementations for both synchronous [BLKCIPHER] and
+     asynchronous [ABLKCIPHER] case. The multi-block ciphers are
+     used for transformations which operate on scatterlists of
+     data supplied to the transformation functions. They output
+     the result into a scatterlist of data as well.
+    </para>
+
+    <sect2><title>Registration Specifics</title>
+
+     <para>
+      The registration of [BLKCIPHER] or [ABLKCIPHER] algorithms
+      is one of the most standard procedures throughout the crypto API.
+     </para>
+
+     <para>
+      Note, if a cipher implementation requires a proper alignment
+      of data, the caller should use the functions of
+      crypto_blkcipher_alignmask() or crypto_ablkcipher_alignmask()
+      respectively to identify a memory alignment mask. The kernel
+      crypto API is able to process requests that are unaligned.
+      This implies, however, additional overhead as the kernel
+      crypto API needs to perform the realignment of the data which
+      may imply moving of data.
+     </para>
+    </sect2>
+
+    <sect2><title>Cipher Definition With struct blkcipher_alg and ablkcipher_alg</title>
+     <para>
+      Struct blkcipher_alg defines a synchronous block cipher whereas
+      struct ablkcipher_alg defines an asynchronous block cipher.
+     </para>
+
+     <para>
+      Please refer to the single block cipher description for schematics
+      of the block cipher usage. The usage patterns are exactly the same
+      for [ABLKCIPHER] and [BLKCIPHER] as they are for plain [CIPHER].
+     </para>
+    </sect2>
+
+    <sect2><title>Specifics Of Asynchronous Multi-Block Cipher</title>
+     <para>
+      There are a couple of specifics to the [ABLKCIPHER] interface.
+     </para>
+
+     <para>
+      First of all, some of the drivers will want to use the
+      Generic ScatterWalk in case the hardware needs to be fed
+      separate chunks of the scatterlist which contains the
+      plaintext and will contain the ciphertext. Please refer
+      to the ScatterWalk interface offered by the Linux kernel
+      scatter / gather list implementation.
+     </para>
+    </sect2>
+   </sect1>
+
+   <sect1><title>Hashing [HASH]</title>
+
+    <para>
+     Example of transformations: crc32, md5, sha1, sha256,...
+    </para>
+
+    <sect2><title>Registering And Unregistering The Transformation</title>
+
+     <para>
+      There are multiple ways to register a HASH transformation,
+      depending on whether the transformation is synchronous [SHASH]
+      or asynchronous [AHASH] and the amount of HASH transformations
+      we are registering. You can find the prototypes defined in
+      include/crypto/internal/hash.h:
+     </para>
+
+     <programlisting>
+   int crypto_register_ahash(struct ahash_alg *alg);
+
+   int crypto_register_shash(struct shash_alg *alg);
+   int crypto_register_shashes(struct shash_alg *algs, int count);
+     </programlisting>
+
+     <para>
+      The respective counterparts for unregistering the HASH
+      transformation are as follows:
+     </para>
+
+     <programlisting>
+   int crypto_unregister_ahash(struct ahash_alg *alg);
+
+   int crypto_unregister_shash(struct shash_alg *alg);
+   int crypto_unregister_shashes(struct shash_alg *algs, int count);
+     </programlisting>
+    </sect2>
+
+    <sect2><title>Cipher Definition With struct shash_alg and ahash_alg</title>
+     <para>
+      Here are schematics of how these functions are called when
+      operated from other part of the kernel. Note that the .setkey()
+      call might happen before or after any of these schematics happen,
+      but must not happen during any of these are in-flight. Please note
+      that calling .init() followed immediately by .finish() is also a
+      perfectly valid transformation.
+     </para>
+
+     <programlisting>
+   I)   DATA -----------.
+                        v
+         .init() -&gt; .update() -&gt; .final()      ! .update() might not be called
+                     ^    |         |            at all in this scenario.
+                     '----'         '---&gt; HASH
+
+   II)  DATA -----------.-----------.
+                        v           v
+         .init() -&gt; .update() -&gt; .finup()      ! .update() may not be called
+                     ^    |         |            at all in this scenario.
+                     '----'         '---&gt; HASH
+
+   III) DATA -----------.
+                        v
+                    .digest()                  ! The entire process is handled
+                        |                        by the .digest() call.
+                        '---------------&gt; HASH
+     </programlisting>
+
+     <para>
+      Here is a schematic of how the .export()/.import() functions are
+      called when used from another part of the kernel.
+     </para>
+
+     <programlisting>
+   KEY--.                 DATA--.
+        v                       v                  ! .update() may not be called
+    .setkey() -&gt; .init() -&gt; .update() -&gt; .export()   at all in this scenario.
+                             ^     |         |
+                             '-----'         '--&gt; PARTIAL_HASH
+
+   ----------- other transformations happen here -----------
+
+   PARTIAL_HASH--.   DATA1--.
+                 v          v
+             .import -&gt; .update() -&gt; .final()     ! .update() may not be called
+                         ^    |         |           at all in this scenario.
+                         '----'         '--&gt; HASH1
+
+   PARTIAL_HASH--.   DATA2-.
+                 v         v
+             .import -&gt; .finup()
+                           |
+                           '---------------&gt; HASH2
+     </programlisting>
+    </sect2>
+
+    <sect2><title>Specifics Of Asynchronous HASH Transformation</title>
+     <para>
+      Some of the drivers will want to use the Generic ScatterWalk
+      in case the implementation needs to be fed separate chunks of the
+      scatterlist which contains the input data. The buffer containing
+      the resulting hash will always be properly aligned to
+      .cra_alignmask so there is no need to worry about this.
+     </para>
+    </sect2>
+   </sect1>
+  </chapter>
+
+  <chapter id="API"><title>Programming Interface</title>
+   <sect1><title>Block Cipher Context Data Structures</title>
+!Pinclude/linux/crypto.h Block Cipher Context Data Structures
+!Finclude/linux/crypto.h aead_request
+   </sect1>
+   <sect1><title>Block Cipher Algorithm Definitions</title>
+!Pinclude/linux/crypto.h Block Cipher Algorithm Definitions
+!Finclude/linux/crypto.h crypto_alg
+!Finclude/linux/crypto.h ablkcipher_alg
+!Finclude/linux/crypto.h aead_alg
+!Finclude/linux/crypto.h blkcipher_alg
+!Finclude/linux/crypto.h cipher_alg
+!Finclude/linux/crypto.h rng_alg
+   </sect1>
+   <sect1><title>Asynchronous Block Cipher API</title>
+!Pinclude/linux/crypto.h Asynchronous Block Cipher API
+!Finclude/linux/crypto.h crypto_alloc_ablkcipher
+!Finclude/linux/crypto.h crypto_free_ablkcipher
+!Finclude/linux/crypto.h crypto_has_ablkcipher
+!Finclude/linux/crypto.h crypto_ablkcipher_ivsize
+!Finclude/linux/crypto.h crypto_ablkcipher_blocksize
+!Finclude/linux/crypto.h crypto_ablkcipher_setkey
+!Finclude/linux/crypto.h crypto_ablkcipher_reqtfm
+!Finclude/linux/crypto.h crypto_ablkcipher_encrypt
+!Finclude/linux/crypto.h crypto_ablkcipher_decrypt
+   </sect1>
+   <sect1><title>Asynchronous Cipher Request Handle</title>
+!Pinclude/linux/crypto.h Asynchronous Cipher Request Handle
+!Finclude/linux/crypto.h crypto_ablkcipher_reqsize
+!Finclude/linux/crypto.h ablkcipher_request_set_tfm
+!Finclude/linux/crypto.h ablkcipher_request_alloc
+!Finclude/linux/crypto.h ablkcipher_request_free
+!Finclude/linux/crypto.h ablkcipher_request_set_callback
+!Finclude/linux/crypto.h ablkcipher_request_set_crypt
+   </sect1>
+   <sect1><title>Authenticated Encryption With Associated Data (AEAD) Cipher API</title>
+!Pinclude/linux/crypto.h Authenticated Encryption With Associated Data (AEAD) Cipher API
+!Finclude/linux/crypto.h crypto_alloc_aead
+!Finclude/linux/crypto.h crypto_free_aead
+!Finclude/linux/crypto.h crypto_aead_ivsize
+!Finclude/linux/crypto.h crypto_aead_authsize
+!Finclude/linux/crypto.h crypto_aead_blocksize
+!Finclude/linux/crypto.h crypto_aead_setkey
+!Finclude/linux/crypto.h crypto_aead_setauthsize
+!Finclude/linux/crypto.h crypto_aead_encrypt
+!Finclude/linux/crypto.h crypto_aead_decrypt
+   </sect1>
+   <sect1><title>Asynchronous AEAD Request Handle</title>
+!Pinclude/linux/crypto.h Asynchronous AEAD Request Handle
+!Finclude/linux/crypto.h crypto_aead_reqsize
+!Finclude/linux/crypto.h aead_request_set_tfm
+!Finclude/linux/crypto.h aead_request_alloc
+!Finclude/linux/crypto.h aead_request_free
+!Finclude/linux/crypto.h aead_request_set_callback
+!Finclude/linux/crypto.h aead_request_set_crypt
+!Finclude/linux/crypto.h aead_request_set_assoc
+   </sect1>
+   <sect1><title>Synchronous Block Cipher API</title>
+!Pinclude/linux/crypto.h Synchronous Block Cipher API
+!Finclude/linux/crypto.h crypto_alloc_blkcipher
+!Finclude/linux/crypto.h crypto_free_blkcipher
+!Finclude/linux/crypto.h crypto_has_blkcipher
+!Finclude/linux/crypto.h crypto_blkcipher_name
+!Finclude/linux/crypto.h crypto_blkcipher_ivsize
+!Finclude/linux/crypto.h crypto_blkcipher_blocksize
+!Finclude/linux/crypto.h crypto_blkcipher_setkey
+!Finclude/linux/crypto.h crypto_blkcipher_encrypt
+!Finclude/linux/crypto.h crypto_blkcipher_encrypt_iv
+!Finclude/linux/crypto.h crypto_blkcipher_decrypt
+!Finclude/linux/crypto.h crypto_blkcipher_decrypt_iv
+!Finclude/linux/crypto.h crypto_blkcipher_set_iv
+!Finclude/linux/crypto.h crypto_blkcipher_get_iv
+   </sect1>
+   <sect1><title>Single Block Cipher API</title>
+!Pinclude/linux/crypto.h Single Block Cipher API
+!Finclude/linux/crypto.h crypto_alloc_cipher
+!Finclude/linux/crypto.h crypto_free_cipher
+!Finclude/linux/crypto.h crypto_has_cipher
+!Finclude/linux/crypto.h crypto_cipher_blocksize
+!Finclude/linux/crypto.h crypto_cipher_setkey
+!Finclude/linux/crypto.h crypto_cipher_encrypt_one
+!Finclude/linux/crypto.h crypto_cipher_decrypt_one
+   </sect1>
+   <sect1><title>Synchronous Message Digest API</title>
+!Pinclude/linux/crypto.h Synchronous Message Digest API
+!Finclude/linux/crypto.h crypto_alloc_hash
+!Finclude/linux/crypto.h crypto_free_hash
+!Finclude/linux/crypto.h crypto_has_hash
+!Finclude/linux/crypto.h crypto_hash_blocksize
+!Finclude/linux/crypto.h crypto_hash_digestsize
+!Finclude/linux/crypto.h crypto_hash_init
+!Finclude/linux/crypto.h crypto_hash_update
+!Finclude/linux/crypto.h crypto_hash_final
+!Finclude/linux/crypto.h crypto_hash_digest
+!Finclude/linux/crypto.h crypto_hash_setkey
+   </sect1>
+   <sect1><title>Message Digest Algorithm Definitions</title>
+!Pinclude/crypto/hash.h Message Digest Algorithm Definitions
+!Finclude/crypto/hash.h hash_alg_common
+!Finclude/crypto/hash.h ahash_alg
+!Finclude/crypto/hash.h shash_alg
+   </sect1>
+   <sect1><title>Asynchronous Message Digest API</title>
+!Pinclude/crypto/hash.h Asynchronous Message Digest API
+!Finclude/crypto/hash.h crypto_alloc_ahash
+!Finclude/crypto/hash.h crypto_free_ahash
+!Finclude/crypto/hash.h crypto_ahash_init
+!Finclude/crypto/hash.h crypto_ahash_digestsize
+!Finclude/crypto/hash.h crypto_ahash_reqtfm
+!Finclude/crypto/hash.h crypto_ahash_reqsize
+!Finclude/crypto/hash.h crypto_ahash_setkey
+!Finclude/crypto/hash.h crypto_ahash_finup
+!Finclude/crypto/hash.h crypto_ahash_final
+!Finclude/crypto/hash.h crypto_ahash_digest
+!Finclude/crypto/hash.h crypto_ahash_export
+!Finclude/crypto/hash.h crypto_ahash_import
+   </sect1>
+   <sect1><title>Asynchronous Hash Request Handle</title>
+!Pinclude/crypto/hash.h Asynchronous Hash Request Handle
+!Finclude/crypto/hash.h ahash_request_set_tfm
+!Finclude/crypto/hash.h ahash_request_alloc
+!Finclude/crypto/hash.h ahash_request_free
+!Finclude/crypto/hash.h ahash_request_set_callback
+!Finclude/crypto/hash.h ahash_request_set_crypt
+   </sect1>
+   <sect1><title>Synchronous Message Digest API</title>
+!Pinclude/crypto/hash.h Synchronous Message Digest API
+!Finclude/crypto/hash.h crypto_alloc_shash
+!Finclude/crypto/hash.h crypto_free_shash
+!Finclude/crypto/hash.h crypto_shash_blocksize
+!Finclude/crypto/hash.h crypto_shash_digestsize
+!Finclude/crypto/hash.h crypto_shash_descsize
+!Finclude/crypto/hash.h crypto_shash_setkey
+!Finclude/crypto/hash.h crypto_shash_digest
+!Finclude/crypto/hash.h crypto_shash_export
+!Finclude/crypto/hash.h crypto_shash_import
+!Finclude/crypto/hash.h crypto_shash_init
+!Finclude/crypto/hash.h crypto_shash_update
+!Finclude/crypto/hash.h crypto_shash_final
+!Finclude/crypto/hash.h crypto_shash_finup
+   </sect1>
+   <sect1><title>Crypto API Random Number API</title>
+!Pinclude/crypto/rng.h Random number generator API
+!Finclude/crypto/rng.h crypto_alloc_rng
+!Finclude/crypto/rng.h crypto_rng_alg
+!Finclude/crypto/rng.h crypto_free_rng
+!Finclude/crypto/rng.h crypto_rng_get_bytes
+!Finclude/crypto/rng.h crypto_rng_reset
+!Finclude/crypto/rng.h crypto_rng_seedsize
+!Cinclude/crypto/rng.h
+   </sect1>
+  </chapter>
+
+  <chapter id="Code"><title>Code Examples</title>
+   <sect1><title>Code Example For Asynchronous Block Cipher Operation</title>
+    <programlisting>
+
+struct tcrypt_result {
+	struct completion completion;
+	int err;
+};
+
+/* tie all data structures together */
+struct ablkcipher_def {
+	struct scatterlist sg;
+	struct crypto_ablkcipher *tfm;
+	struct ablkcipher_request *req;
+	struct tcrypt_result result;
+};
+
+/* Callback function */
+static void test_ablkcipher_cb(struct crypto_async_request *req, int error)
+{
+	struct tcrypt_result *result = req-&gt;data;
+
+	if (error == -EINPROGRESS)
+		return;
+	result-&gt;err = error;
+	complete(&amp;result-&gt;completion);
+	pr_info("Encryption finished successfully\n");
+}
+
+/* Perform cipher operation */
+static unsigned int test_ablkcipher_encdec(struct ablkcipher_def *ablk,
+					   int enc)
+{
+	int rc = 0;
+
+	if (enc)
+		rc = crypto_ablkcipher_encrypt(ablk-&gt;req);
+	else
+		rc = crypto_ablkcipher_decrypt(ablk-&gt;req);
+
+	switch (rc) {
+	case 0:
+		break;
+	case -EINPROGRESS:
+	case -EBUSY:
+		rc = wait_for_completion_interruptible(
+			&amp;ablk-&gt;result.completion);
+		if (!rc &amp;&amp; !ablk-&gt;result.err) {
+			reinit_completion(&amp;ablk-&gt;result.completion);
+			break;
+		}
+	default:
+		pr_info("ablkcipher encrypt returned with %d result %d\n",
+		       rc, ablk-&gt;result.err);
+		break;
+	}
+	init_completion(&amp;ablk-&gt;result.completion);
+
+	return rc;
+}
+
+/* Initialize and trigger cipher operation */
+static int test_ablkcipher(void)
+{
+	struct ablkcipher_def ablk;
+	struct crypto_ablkcipher *ablkcipher = NULL;
+	struct ablkcipher_request *req = NULL;
+	char *scratchpad = NULL;
+	char *ivdata = NULL;
+	unsigned char key[32];
+	int ret = -EFAULT;
+
+	ablkcipher = crypto_alloc_ablkcipher("cbc-aes-aesni", 0, 0);
+	if (IS_ERR(ablkcipher)) {
+		pr_info("could not allocate ablkcipher handle\n");
+		return PTR_ERR(ablkcipher);
+	}
+
+	req = ablkcipher_request_alloc(ablkcipher, GFP_KERNEL);
+	if (IS_ERR(req)) {
+		pr_info("could not allocate request queue\n");
+		ret = PTR_ERR(req);
+		goto out;
+	}
+
+	ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+					test_ablkcipher_cb,
+					&amp;ablk.result);
+
+	/* AES 256 with random key */
+	get_random_bytes(&amp;key, 32);
+	if (crypto_ablkcipher_setkey(ablkcipher, key, 32)) {
+		pr_info("key could not be set\n");
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	/* IV will be random */
+	ivdata = kmalloc(16, GFP_KERNEL);
+	if (!ivdata) {
+		pr_info("could not allocate ivdata\n");
+		goto out;
+	}
+	get_random_bytes(ivdata, 16);
+
+	/* Input data will be random */
+	scratchpad = kmalloc(16, GFP_KERNEL);
+	if (!scratchpad) {
+		pr_info("could not allocate scratchpad\n");
+		goto out;
+	}
+	get_random_bytes(scratchpad, 16);
+
+	ablk.tfm = ablkcipher;
+	ablk.req = req;
+
+	/* We encrypt one block */
+	sg_init_one(&amp;ablk.sg, scratchpad, 16);
+	ablkcipher_request_set_crypt(req, &amp;ablk.sg, &amp;ablk.sg, 16, ivdata);
+	init_completion(&amp;ablk.result.completion);
+
+	/* encrypt data */
+	ret = test_ablkcipher_encdec(&amp;ablk, 1);
+	if (ret)
+		goto out;
+
+	pr_info("Encryption triggered successfully\n");
+
+out:
+	if (ablkcipher)
+		crypto_free_ablkcipher(ablkcipher);
+	if (req)
+		ablkcipher_request_free(req);
+	if (ivdata)
+		kfree(ivdata);
+	if (scratchpad)
+		kfree(scratchpad);
+	return ret;
+}
+    </programlisting>
+   </sect1>
+
+   <sect1><title>Code Example For Synchronous Block Cipher Operation</title>
+    <programlisting>
+
+static int test_blkcipher(void)
+{
+	struct crypto_blkcipher *blkcipher = NULL;
+	char *cipher = "cbc(aes)";
+	// AES 128
+	charkey =
+"\x12\x34\x56\x78\x90\xab\xcd\xef\x12\x34\x56\x78\x90\xab\xcd\xef";
+	chariv =
+"\x12\x34\x56\x78\x90\xab\xcd\xef\x12\x34\x56\x78\x90\xab\xcd\xef";
+	unsigned int ivsize = 0;
+	char *scratchpad = NULL; // holds plaintext and ciphertext
+	struct scatterlist sg;
+	struct blkcipher_desc desc;
+	int ret = -EFAULT;
+
+	blkcipher = crypto_alloc_blkcipher(cipher, 0, 0);
+	if (IS_ERR(blkcipher)) {
+		printk("could not allocate blkcipher handle for %s\n", cipher);
+		return -PTR_ERR(blkcipher);
+	}
+
+	if (crypto_blkcipher_setkey(blkcipher, key, strlen(key))) {
+		printk("key could not be set\n");
+		ret = -EAGAIN;
+		goto out;
+	}
+
+	ivsize = crypto_blkcipher_ivsize(blkcipher);
+	if (ivsize) {
+		if (ivsize != strlen(iv))
+			printk("IV length differs from expected length\n");
+		crypto_blkcipher_set_iv(blkcipher, iv, ivsize);
+	}
+
+	scratchpad = kmalloc(crypto_blkcipher_blocksize(blkcipher), GFP_KERNEL);
+	if (!scratchpad) {
+		printk("could not allocate scratchpad for %s\n", cipher);
+		goto out;
+	}
+	/* get some random data that we want to encrypt */
+	get_random_bytes(scratchpad, crypto_blkcipher_blocksize(blkcipher));
+
+	desc.flags = 0;
+	desc.tfm = blkcipher;
+	sg_init_one(&amp;sg, scratchpad, crypto_blkcipher_blocksize(blkcipher));
+
+	/* encrypt data in place */
+	crypto_blkcipher_encrypt(&amp;desc, &amp;sg, &amp;sg,
+				 crypto_blkcipher_blocksize(blkcipher));
+
+	/* decrypt data in place
+	 * crypto_blkcipher_decrypt(&amp;desc, &amp;sg, &amp;sg,
+	 */			 crypto_blkcipher_blocksize(blkcipher));
+
+
+	printk("Cipher operation completed\n");
+	return 0;
+
+out:
+	if (blkcipher)
+		crypto_free_blkcipher(blkcipher);
+	if (scratchpad)
+		kzfree(scratchpad);
+	return ret;
+}
+    </programlisting>
+   </sect1>
+
+   <sect1><title>Code Example For Use of Operational State Memory With SHASH</title>
+    <programlisting>
+
+struct sdesc {
+	struct shash_desc shash;
+	char ctx[];
+};
+
+static struct sdescinit_sdesc(struct crypto_shash *alg)
+{
+	struct sdescsdesc;
+	int size;
+
+	size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+	sdesc = kmalloc(size, GFP_KERNEL);
+	if (!sdesc)
+		return ERR_PTR(-ENOMEM);
+	sdesc-&gt;shash.tfm = alg;
+	sdesc-&gt;shash.flags = 0x0;
+	return sdesc;
+}
+
+static int calc_hash(struct crypto_shashalg,
+		     const unsigned chardata, unsigned int datalen,
+		     unsigned chardigest) {
+	struct sdescsdesc;
+	int ret;
+
+	sdesc = init_sdesc(alg);
+	if (IS_ERR(sdesc)) {
+		pr_info("trusted_key: can't alloc %s\n", hash_alg);
+		return PTR_ERR(sdesc);
+	}
+
+	ret = crypto_shash_digest(&amp;sdesc-&gt;shash, data, datalen, digest);
+	kfree(sdesc);
+	return ret;
+}
+    </programlisting>
+   </sect1>
+
+   <sect1><title>Code Example For Random Number Generator Usage</title>
+    <programlisting>
+
+static int get_random_numbers(u8 *buf, unsigned int len)
+{
+	struct crypto_rngrng = NULL;
+	chardrbg = "drbg_nopr_sha256"; /* Hash DRBG with SHA-256, no PR */
+	int ret;
+
+	if (!buf || !len) {
+		pr_debug("No output buffer provided\n");
+		return -EINVAL;
+	}
+
+	rng = crypto_alloc_rng(drbg, 0, 0);
+	if (IS_ERR(rng)) {
+		pr_debug("could not allocate RNG handle for %s\n", drbg);
+		return -PTR_ERR(rng);
+	}
+
+	ret = crypto_rng_get_bytes(rng, buf, len);
+	if (ret &lt; 0)
+		pr_debug("generation of random numbers failed\n");
+	else if (ret == 0)
+		pr_debug("RNG returned no data");
+	else
+		pr_debug("RNG returned %d bytes of data\n", ret);
+
+out:
+	crypto_free_rng(rng);
+	return ret;
+}
+    </programlisting>
+   </sect1>
+  </chapter>
+ </book>
diff --git a/Documentation/crypto/crypto-API-userspace.txt b/Documentation/crypto/crypto-API-userspace.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ac619cd9030004b24c3e47db53762a032e3c19d7
--- /dev/null
+++ b/Documentation/crypto/crypto-API-userspace.txt
@@ -0,0 +1,205 @@
+Introduction
+============
+
+The concepts of the kernel crypto API visible to kernel space is fully
+applicable to the user space interface as well. Therefore, the kernel crypto API
+high level discussion for the in-kernel use cases applies here as well.
+
+The major difference, however, is that user space can only act as a consumer
+and never as a provider of a transformation or cipher algorithm.
+
+The following covers the user space interface exported by the kernel crypto
+API. A working example of this description is libkcapi that can be obtained from
+[1]. That library can be used by user space applications that require
+cryptographic services from the kernel.
+
+Some details of the in-kernel kernel crypto API aspects do not
+apply to user space, however. This includes the difference between synchronous
+and asynchronous invocations. The user space API call is fully synchronous.
+In addition, only a subset of all cipher types are available as documented
+below.
+
+
+User space API general remarks
+==============================
+
+The kernel crypto API is accessible from user space. Currently, the following
+ciphers are accessible:
+
+	* Message digest including keyed message digest (HMAC, CMAC)
+
+	* Symmetric ciphers
+
+Note, AEAD ciphers are currently not supported via the symmetric cipher
+interface.
+
+The interface is provided via Netlink using the type AF_ALG. In addition, the
+setsockopt option type is SOL_ALG. In case the user space header files do not
+export these flags yet, use the following macros:
+
+#ifndef AF_ALG
+#define AF_ALG 38
+#endif
+#ifndef SOL_ALG
+#define SOL_ALG 279
+#endif
+
+A cipher is accessed with the same name as done for the in-kernel API calls.
+This includes the generic vs. unique naming schema for ciphers as well as the
+enforcement of priorities for generic names.
+
+To interact with the kernel crypto API, a Netlink socket must be created by
+the user space application. User space invokes the cipher operation with the
+send/write system call family. The result of the cipher operation is obtained
+with the read/recv system call family.
+
+The following API calls assume that the Netlink socket descriptor is already
+opened by the user space application and discusses only the kernel crypto API
+specific invocations.
+
+To initialize a Netlink interface, the following sequence has to be performed
+by the consumer:
+
+	1. Create a socket of type AF_ALG with the struct sockaddr_alg parameter
+	   specified below for the different cipher types.
+
+	2. Invoke bind with the socket descriptor
+
+	3. Invoke accept with the socket descriptor. The accept system call
+	   returns a new file descriptor that is to be used to interact with
+	   the particular cipher instance. When invoking send/write or recv/read
+	   system calls to send data to the kernel or obtain data from the
+	   kernel, the file descriptor returned by accept must be used.
+
+In-place cipher operation
+=========================
+
+Just like the in-kernel operation of the kernel crypto API, the user space
+interface allows the cipher operation in-place. That means that the input buffer
+used for the send/write system call and the output buffer used by the read/recv
+system call may be one and the same. This is of particular interest for
+symmetric cipher operations where a copying of the output data to its final
+destination can be avoided.
+
+If a consumer on the other hand wants to maintain the plaintext and the
+ciphertext in different memory locations, all a consumer needs to do is to
+provide different memory pointers for the encryption and decryption operation.
+
+Message digest API
+==================
+
+The message digest type to be used for the cipher operation is selected when
+invoking the bind syscall. bind requires the caller to provide a filled
+struct sockaddr data structure. This data structure must be filled as follows:
+
+struct sockaddr_alg sa = {
+	.salg_family = AF_ALG,
+	.salg_type = "hash", /* this selects the hash logic in the kernel */
+	.salg_name = "sha1" /* this is the cipher name */
+};
+
+The salg_type value "hash" applies to message digests and keyed message digests.
+Though, a keyed message digest is referenced by the appropriate salg_name.
+Please see below for the setsockopt interface that explains how the key can be
+set for a keyed message digest.
+
+Using the send() system call, the application provides the data that should be
+processed with the message digest. The send system call allows the following
+flags to be specified:
+
+	* MSG_MORE: If this flag is set, the send system call acts like a
+		    message digest update function where the final hash is not
+		    yet calculated. If the flag is not set, the send system call
+		    calculates the final message digest immediately.
+
+With the recv() system call, the application can read the message digest from
+the kernel crypto API. If the buffer is too small for the message digest, the
+flag MSG_TRUNC is set by the kernel.
+
+In order to set a message digest key, the calling application must use the
+setsockopt() option of ALG_SET_KEY. If the key is not set the HMAC operation is
+performed without the initial HMAC state change caused by the key.
+
+
+Symmetric cipher API
+====================
+
+The operation is very similar to the message digest discussion. During
+initialization, the struct sockaddr data structure must be filled as follows:
+
+struct sockaddr_alg sa = {
+	.salg_family = AF_ALG,
+	.salg_type = "skcipher", /* this selects the symmetric cipher */
+	.salg_name = "cbc(aes)" /* this is the cipher name */
+};
+
+Before data can be sent to the kernel using the write/send system call family,
+the consumer must set the key. The key setting is described with the setsockopt
+invocation below.
+
+Using the sendmsg() system call, the application provides the data that should
+be processed for encryption or decryption. In addition, the IV is specified
+with the data structure provided by the sendmsg() system call.
+
+The sendmsg system call parameter of struct msghdr is embedded into the
+struct cmsghdr data structure. See recv(2) and cmsg(3) for more information
+on how the cmsghdr data structure is used together with the send/recv system
+call family. That cmsghdr data structure holds the following information
+specified with a separate header instances:
+
+	* specification of the cipher operation type with one of these flags:
+		ALG_OP_ENCRYPT - encryption of data
+		ALG_OP_DECRYPT - decryption of data
+
+	* specification of the IV information marked with the flag ALG_SET_IV
+
+The send system call family allows the following flag to be specified:
+
+	* MSG_MORE: If this flag is set, the send system call acts like a
+		    cipher update function where more input data is expected
+		    with a subsequent invocation of the send system call.
+
+Note: The kernel reports -EINVAL for any unexpected data. The caller must
+make sure that all data matches the constraints given in /proc/crypto for the
+selected cipher.
+
+With the recv() system call, the application can read the result of the
+cipher operation from the kernel crypto API. The output buffer must be at least
+as large as to hold all blocks of the encrypted or decrypted data. If the output
+data size is smaller, only as many blocks are returned that fit into that
+output buffer size.
+
+Setsockopt interface
+====================
+
+In addition to the read/recv and send/write system call handling to send and
+retrieve data subject to the cipher operation, a consumer also needs to set
+the additional information for the cipher operation. This additional information
+is set using the setsockopt system call that must be invoked with the file
+descriptor of the open cipher (i.e. the file descriptor returned by the
+accept system call).
+
+Each setsockopt invocation must use the level SOL_ALG.
+
+The setsockopt interface allows setting the following data using the mentioned
+optname:
+
+	* ALG_SET_KEY -- Setting the key. Key setting is applicable to:
+
+		- the skcipher cipher type (symmetric ciphers)
+
+		- the hash cipher type (keyed message digests)
+
+User space API example
+======================
+
+Please see [1] for libkcapi which provides an easy-to-use wrapper around the
+aforementioned Netlink kernel interface. [1] also contains a test application
+that invokes all libkcapi API calls.
+
+[1] http://www.chronox.de/libkcapi.html
+
+Author
+======
+
+Stephan Mueller <smueller@chronox.de>
diff --git a/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt b/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt
index 5c65eccd0e56b3a58b611bac9da4b6f9f40a52d8..e8a35c71e9478c76db058ac1c6cf6c921bc919ad 100644
--- a/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-imx-sahara.txt
@@ -1,5 +1,5 @@
 Freescale SAHARA Cryptographic Accelerator included in some i.MX chips.
-Currently only i.MX27 is supported.
+Currently only i.MX27 and i.MX53 are supported.
 
 Required properties:
 - compatible : Should be "fsl,<soc>-sahara"
diff --git a/Documentation/devicetree/bindings/hwrng/atmel-trng.txt b/Documentation/devicetree/bindings/hwrng/atmel-trng.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4ac5aaa2d024147e5cb88c812a6b92f710b0b6a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/hwrng/atmel-trng.txt
@@ -0,0 +1,16 @@
+Atmel TRNG (True Random Number Generator) block
+
+Required properties:
+- compatible : Should be "atmel,at91sam9g45-trng"
+- reg : Offset and length of the register set of this block
+- interrupts : the interrupt number for the TRNG block
+- clocks: should contain the TRNG clk source
+
+Example:
+
+trng@fffcc000 {
+	compatible = "atmel,at91sam9g45-trng";
+	reg = <0xfffcc000 0x4000>;
+	interrupts = <6 IRQ_TYPE_LEVEL_HIGH 0>;
+	clocks = <&trng_clk>;
+};
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index d3f65130a1f8b5222d9e920d2713fcc0cf60ca3e..6c0637a4bda56eb73b433a11473d806249e01a70 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -940,6 +940,13 @@
 				status = "disabled";
 			};
 
+			trng@fffcc000 {
+				compatible = "atmel,at91sam9g45-trng";
+				reg = <0xfffcc000 0x4000>;
+				interrupts = <6 IRQ_TYPE_LEVEL_HIGH 0>;
+				clocks = <&trng_clk>;
+			};
+
 			i2c0: i2c@fff84000 {
 				compatible = "atmel,at91sam9g10-i2c";
 				reg = <0xfff84000 0x100>;
diff --git a/arch/arm/crypto/aes_glue.c b/arch/arm/crypto/aes_glue.c
index 3003fa1f6fb4b9395c77340fbf83011b5cb0e419..0409b8f897823f815af5581f0c9a73e29f80eaf8 100644
--- a/arch/arm/crypto/aes_glue.c
+++ b/arch/arm/crypto/aes_glue.c
@@ -93,6 +93,6 @@ module_exit(aes_fini);
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("aes");
-MODULE_ALIAS("aes-asm");
+MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("aes-asm");
 MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index 84f2a756588be55ed6842f1120a562a75c31e8bb..e31b0440c6139dc932b0efd4b98aaab077cb862d 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -171,5 +171,5 @@ module_exit(sha1_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)");
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
 MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
index 6f1b411b1d55440b62ff7959b15ac7c9e9cb09d5..0b0083757d477f4aef8bca9e82cfb155b6eecbfb 100644
--- a/arch/arm/crypto/sha1_neon_glue.c
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -194,4 +194,4 @@ module_exit(sha1_neon_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated");
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/arm/crypto/sha512_neon_glue.c b/arch/arm/crypto/sha512_neon_glue.c
index 0d2758ff5e12ec903ca9fca031f229dd1501b00e..b124dce838d6e0d75d8429450d4972992251952c 100644
--- a/arch/arm/crypto/sha512_neon_glue.c
+++ b/arch/arm/crypto/sha512_neon_glue.c
@@ -241,7 +241,7 @@ static int sha384_neon_final(struct shash_desc *desc, u8 *hash)
 	sha512_neon_final(desc, D);
 
 	memcpy(hash, D, SHA384_DIGEST_SIZE);
-	memset(D, 0, SHA512_DIGEST_SIZE);
+	memzero_explicit(D, SHA512_DIGEST_SIZE);
 
 	return 0;
 }
@@ -301,5 +301,5 @@ module_exit(sha512_neon_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated");
 
-MODULE_ALIAS("sha512");
-MODULE_ALIAS("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
+MODULE_ALIAS_CRYPTO("sha384");
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index a38b02ce5f9a9c68af825cfc462382e2f2a9c836..2cf32e9887e1b292a0ec9102761ce3a200b8fa0d 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -49,4 +49,8 @@ config CRYPTO_AES_ARM64_NEON_BLK
 	select CRYPTO_AES
 	select CRYPTO_ABLK_HELPER
 
+config CRYPTO_CRC32_ARM64
+	tristate "CRC32 and CRC32C using optional ARMv8 instructions"
+	depends on ARM64
+	select CRYPTO_HASH
 endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index a3f935fde97527d2189fb7f6afffc9dc5102ba57..5720608c50b1b7f969f881b8695a78742868f379 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -34,5 +34,9 @@ AFLAGS_aes-neon.o	:= -DINTERLEAVE=4
 
 CFLAGS_aes-glue-ce.o	:= -DUSE_V8_CRYPTO_EXTENSIONS
 
+obj-$(CONFIG_CRYPTO_CRC32_ARM64) += crc32-arm64.o
+
+CFLAGS_crc32-arm64.o	:= -mcpu=generic+crc
+
 $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
 	$(call if_changed_rule,cc_o_c)
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 0ac73b838fa32ca58561b6b9c7f73d10d824daa9..6c348df5bf36d8d9c54b88b3e7be900e754357ab 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -296,4 +296,4 @@ module_exit(aes_mod_exit);
 MODULE_DESCRIPTION("Synchronous AES in CCM mode using ARMv8 Crypto Extensions");
 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
 MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("ccm(aes)");
+MODULE_ALIAS_CRYPTO("ccm(aes)");
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 801aae32841f2513ba665034ae4817945debb781..b1b5b893eb20af572484322bbe9c81ee2c12bf62 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -44,10 +44,10 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
 #define aes_xts_encrypt		neon_aes_xts_encrypt
 #define aes_xts_decrypt		neon_aes_xts_decrypt
 MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
-MODULE_ALIAS("ecb(aes)");
-MODULE_ALIAS("cbc(aes)");
-MODULE_ALIAS("ctr(aes)");
-MODULE_ALIAS("xts(aes)");
+MODULE_ALIAS_CRYPTO("ecb(aes)");
+MODULE_ALIAS_CRYPTO("cbc(aes)");
+MODULE_ALIAS_CRYPTO("ctr(aes)");
+MODULE_ALIAS_CRYPTO("xts(aes)");
 #endif
 
 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
diff --git a/arch/arm64/crypto/crc32-arm64.c b/arch/arm64/crypto/crc32-arm64.c
new file mode 100644
index 0000000000000000000000000000000000000000..9499199924aebd4d6b27ced325d69918b641ddf3
--- /dev/null
+++ b/arch/arm64/crypto/crc32-arm64.c
@@ -0,0 +1,274 @@
+/*
+ * crc32-arm64.c - CRC32 and CRC32C using optional ARMv8 instructions
+ *
+ * Module based on crypto/crc32c_generic.c
+ *
+ * CRC32 loop taken from Ed Nevill's Hadoop CRC patch
+ * http://mail-archives.apache.org/mod_mbox/hadoop-common-dev/201406.mbox/%3C1403687030.3355.19.camel%40localhost.localdomain%3E
+ *
+ * Using inline assembly instead of intrinsics in order to be backwards
+ * compatible with older compilers.
+ *
+ * Copyright (C) 2014 Linaro Ltd <yazen.ghannam@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/unaligned/access_ok.h>
+#include <linux/cpufeature.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+
+#include <crypto/internal/hash.h>
+
+MODULE_AUTHOR("Yazen Ghannam <yazen.ghannam@linaro.org>");
+MODULE_DESCRIPTION("CRC32 and CRC32C using optional ARMv8 instructions");
+MODULE_LICENSE("GPL v2");
+
+#define CRC32X(crc, value) __asm__("crc32x %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32W(crc, value) __asm__("crc32w %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32H(crc, value) __asm__("crc32h %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32B(crc, value) __asm__("crc32b %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32CX(crc, value) __asm__("crc32cx %w[c], %w[c], %x[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32CW(crc, value) __asm__("crc32cw %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32CH(crc, value) __asm__("crc32ch %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+#define CRC32CB(crc, value) __asm__("crc32cb %w[c], %w[c], %w[v]":[c]"+r"(crc):[v]"r"(value))
+
+static u32 crc32_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
+{
+	s64 length = len;
+
+	while ((length -= sizeof(u64)) >= 0) {
+		CRC32X(crc, get_unaligned_le64(p));
+		p += sizeof(u64);
+	}
+
+	/* The following is more efficient than the straight loop */
+	if (length & sizeof(u32)) {
+		CRC32W(crc, get_unaligned_le32(p));
+		p += sizeof(u32);
+	}
+	if (length & sizeof(u16)) {
+		CRC32H(crc, get_unaligned_le16(p));
+		p += sizeof(u16);
+	}
+	if (length & sizeof(u8))
+		CRC32B(crc, *p);
+
+	return crc;
+}
+
+static u32 crc32c_arm64_le_hw(u32 crc, const u8 *p, unsigned int len)
+{
+	s64 length = len;
+
+	while ((length -= sizeof(u64)) >= 0) {
+		CRC32CX(crc, get_unaligned_le64(p));
+		p += sizeof(u64);
+	}
+
+	/* The following is more efficient than the straight loop */
+	if (length & sizeof(u32)) {
+		CRC32CW(crc, get_unaligned_le32(p));
+		p += sizeof(u32);
+	}
+	if (length & sizeof(u16)) {
+		CRC32CH(crc, get_unaligned_le16(p));
+		p += sizeof(u16);
+	}
+	if (length & sizeof(u8))
+		CRC32CB(crc, *p);
+
+	return crc;
+}
+
+#define CHKSUM_BLOCK_SIZE	1
+#define CHKSUM_DIGEST_SIZE	4
+
+struct chksum_ctx {
+	u32 key;
+};
+
+struct chksum_desc_ctx {
+	u32 crc;
+};
+
+static int chksum_init(struct shash_desc *desc)
+{
+	struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
+	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+	ctx->crc = mctx->key;
+
+	return 0;
+}
+
+/*
+ * Setting the seed allows arbitrary accumulators and flexible XOR policy
+ * If your algorithm starts with ~0, then XOR with ~0 before you set
+ * the seed.
+ */
+static int chksum_setkey(struct crypto_shash *tfm, const u8 *key,
+			 unsigned int keylen)
+{
+	struct chksum_ctx *mctx = crypto_shash_ctx(tfm);
+
+	if (keylen != sizeof(mctx->key)) {
+		crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	}
+	mctx->key = get_unaligned_le32(key);
+	return 0;
+}
+
+static int chksum_update(struct shash_desc *desc, const u8 *data,
+			 unsigned int length)
+{
+	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+	ctx->crc = crc32_arm64_le_hw(ctx->crc, data, length);
+	return 0;
+}
+
+static int chksumc_update(struct shash_desc *desc, const u8 *data,
+			 unsigned int length)
+{
+	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+	ctx->crc = crc32c_arm64_le_hw(ctx->crc, data, length);
+	return 0;
+}
+
+static int chksum_final(struct shash_desc *desc, u8 *out)
+{
+	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+	put_unaligned_le32(~ctx->crc, out);
+	return 0;
+}
+
+static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
+{
+	put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out);
+	return 0;
+}
+
+static int __chksumc_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
+{
+	put_unaligned_le32(~crc32c_arm64_le_hw(crc, data, len), out);
+	return 0;
+}
+
+static int chksum_finup(struct shash_desc *desc, const u8 *data,
+			unsigned int len, u8 *out)
+{
+	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+	return __chksum_finup(ctx->crc, data, len, out);
+}
+
+static int chksumc_finup(struct shash_desc *desc, const u8 *data,
+			unsigned int len, u8 *out)
+{
+	struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
+	return __chksumc_finup(ctx->crc, data, len, out);
+}
+
+static int chksum_digest(struct shash_desc *desc, const u8 *data,
+			 unsigned int length, u8 *out)
+{
+	struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
+
+	return __chksum_finup(mctx->key, data, length, out);
+}
+
+static int chksumc_digest(struct shash_desc *desc, const u8 *data,
+			 unsigned int length, u8 *out)
+{
+	struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm);
+
+	return __chksumc_finup(mctx->key, data, length, out);
+}
+
+static int crc32_cra_init(struct crypto_tfm *tfm)
+{
+	struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
+
+	mctx->key = ~0;
+	return 0;
+}
+
+static struct shash_alg crc32_alg = {
+	.digestsize		=	CHKSUM_DIGEST_SIZE,
+	.setkey			=	chksum_setkey,
+	.init			=	chksum_init,
+	.update			=	chksum_update,
+	.final			=	chksum_final,
+	.finup			=	chksum_finup,
+	.digest			=	chksum_digest,
+	.descsize		=	sizeof(struct chksum_desc_ctx),
+	.base			=	{
+		.cra_name		=	"crc32",
+		.cra_driver_name	=	"crc32-arm64-hw",
+		.cra_priority		=	300,
+		.cra_blocksize		=	CHKSUM_BLOCK_SIZE,
+		.cra_alignmask		=	0,
+		.cra_ctxsize		=	sizeof(struct chksum_ctx),
+		.cra_module		=	THIS_MODULE,
+		.cra_init		=	crc32_cra_init,
+	}
+};
+
+static struct shash_alg crc32c_alg = {
+	.digestsize		=	CHKSUM_DIGEST_SIZE,
+	.setkey			=	chksum_setkey,
+	.init			=	chksum_init,
+	.update			=	chksumc_update,
+	.final			=	chksum_final,
+	.finup			=	chksumc_finup,
+	.digest			=	chksumc_digest,
+	.descsize		=	sizeof(struct chksum_desc_ctx),
+	.base			=	{
+		.cra_name		=	"crc32c",
+		.cra_driver_name	=	"crc32c-arm64-hw",
+		.cra_priority		=	300,
+		.cra_blocksize		=	CHKSUM_BLOCK_SIZE,
+		.cra_alignmask		=	0,
+		.cra_ctxsize		=	sizeof(struct chksum_ctx),
+		.cra_module		=	THIS_MODULE,
+		.cra_init		=	crc32_cra_init,
+	}
+};
+
+static int __init crc32_mod_init(void)
+{
+	int err;
+
+	err = crypto_register_shash(&crc32_alg);
+
+	if (err)
+		return err;
+
+	err = crypto_register_shash(&crc32c_alg);
+
+	if (err) {
+		crypto_unregister_shash(&crc32_alg);
+		return err;
+	}
+
+	return 0;
+}
+
+static void __exit crc32_mod_exit(void)
+{
+	crypto_unregister_shash(&crc32_alg);
+	crypto_unregister_shash(&crc32c_alg);
+}
+
+module_cpu_feature_match(CRC32, crc32_mod_init);
+module_exit(crc32_mod_exit);
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index f9e8b9491efc2736afc66a14f65777ed43d976c3..d3feba5a275f6465fa46b850ed57cb688bd834ea 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -66,7 +66,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
 			src = data + done;
 		} while (done + 63 < len);
 
-		memset(temp, 0, sizeof(temp));
+		memzero_explicit(temp, sizeof(temp));
 		partial = 0;
 	}
 	memcpy(sctx->buffer + partial, src, len - done);
@@ -154,4 +154,4 @@ module_exit(sha1_powerpc_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
 
-MODULE_ALIAS("sha1-powerpc");
+MODULE_ALIAS_CRYPTO("sha1-powerpc");
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 23223cd63e54811d9ab85823df1e03303c88036e..1f272b24fc0bf9393d44939ea2b225a84e36ea2a 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -979,7 +979,7 @@ static void __exit aes_s390_fini(void)
 module_init(aes_s390_init);
 module_exit(aes_s390_fini);
 
-MODULE_ALIAS("aes-all");
+MODULE_ALIAS_CRYPTO("aes-all");
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
 MODULE_LICENSE("GPL");
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 7acb77f7ef1ada0183280c07e75f2c34579bac72..9e05cc453a40d5fd946ecf56f1063f2446812695 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -619,8 +619,8 @@ static void __exit des_s390_exit(void)
 module_init(des_s390_init);
 module_exit(des_s390_exit);
 
-MODULE_ALIAS("des");
-MODULE_ALIAS("des3_ede");
+MODULE_ALIAS_CRYPTO("des");
+MODULE_ALIAS_CRYPTO("des3_ede");
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
diff --git a/arch/s390/crypto/ghash_s390.c b/arch/s390/crypto/ghash_s390.c
index d43485d142e911623d30d5abb5bd80d957f752d4..7940dc90e80bc6729371ab565bad743b1087ef72 100644
--- a/arch/s390/crypto/ghash_s390.c
+++ b/arch/s390/crypto/ghash_s390.c
@@ -160,7 +160,7 @@ static void __exit ghash_mod_exit(void)
 module_init(ghash_mod_init);
 module_exit(ghash_mod_exit);
 
-MODULE_ALIAS("ghash");
+MODULE_ALIAS_CRYPTO("ghash");
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index a1b3a9dc9d8a0f420f6389bd56331890e259ab6b..5b2bee323694b2144c382dd9af85126b81b311a8 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -103,6 +103,6 @@ static void __exit sha1_s390_fini(void)
 module_init(sha1_s390_init);
 module_exit(sha1_s390_fini);
 
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 9b853809a492b68e6781ba6a8c32e5a60fe78932..b74ff158108c9421a25b26a02f3a1f1669d1f69d 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -143,7 +143,7 @@ static void __exit sha256_s390_fini(void)
 module_init(sha256_s390_init);
 module_exit(sha256_s390_fini);
 
-MODULE_ALIAS("sha256");
-MODULE_ALIAS("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha224");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
diff --git a/arch/s390/crypto/sha512_s390.c b/arch/s390/crypto/sha512_s390.c
index 32a81383b69c1b5036c12494802654ad453cbb19..0c36989ba182b1e411b56c3018ab610622cfe6ff 100644
--- a/arch/s390/crypto/sha512_s390.c
+++ b/arch/s390/crypto/sha512_s390.c
@@ -86,7 +86,7 @@ static struct shash_alg sha512_alg = {
 	}
 };
 
-MODULE_ALIAS("sha512");
+MODULE_ALIAS_CRYPTO("sha512");
 
 static int sha384_init(struct shash_desc *desc)
 {
@@ -126,7 +126,7 @@ static struct shash_alg sha384_alg = {
 	}
 };
 
-MODULE_ALIAS("sha384");
+MODULE_ALIAS_CRYPTO("sha384");
 
 static int __init init(void)
 {
diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
index df922f52d76dd6f5173531a1e48b010003c68f36..705408766ab0c07adcdd3d9e12c69b3bd3a9a20b 100644
--- a/arch/sparc/crypto/aes_glue.c
+++ b/arch/sparc/crypto/aes_glue.c
@@ -499,6 +499,6 @@ module_exit(aes_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
 
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
 
 #include "crop_devid.c"
diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
index 888f6260b4ec5169d36c23860a6f0bc70c5f5404..641f55cb61c3a89a9a8eabe12577170a6e62e572 100644
--- a/arch/sparc/crypto/camellia_glue.c
+++ b/arch/sparc/crypto/camellia_glue.c
@@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
 
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
 
 #include "crop_devid.c"
diff --git a/arch/sparc/crypto/crc32c_glue.c b/arch/sparc/crypto/crc32c_glue.c
index 5162fad912ce09faf6fcf5979e2ef60cadc03ba4..d1064e46efe8bea0b83d573e4b6eb0e35aef3596 100644
--- a/arch/sparc/crypto/crc32c_glue.c
+++ b/arch/sparc/crypto/crc32c_glue.c
@@ -176,6 +176,6 @@ module_exit(crc32c_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
 
-MODULE_ALIAS("crc32c");
+MODULE_ALIAS_CRYPTO("crc32c");
 
 #include "crop_devid.c"
diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
index 3065bc61f9d3bcf2ec96e78a2b3a15d96f981e14..d1150097299479224e99f143e6657422cd4db157 100644
--- a/arch/sparc/crypto/des_glue.c
+++ b/arch/sparc/crypto/des_glue.c
@@ -532,6 +532,6 @@ module_exit(des_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
 
-MODULE_ALIAS("des");
+MODULE_ALIAS_CRYPTO("des");
 
 #include "crop_devid.c"
diff --git a/arch/sparc/crypto/md5_glue.c b/arch/sparc/crypto/md5_glue.c
index 09a9ea1dfb697381a410cdaf043262d2e4e24898..64c7ff5f72a9f68fd19832eb6d4238e05b80c114 100644
--- a/arch/sparc/crypto/md5_glue.c
+++ b/arch/sparc/crypto/md5_glue.c
@@ -185,6 +185,6 @@ module_exit(md5_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
 
-MODULE_ALIAS("md5");
+MODULE_ALIAS_CRYPTO("md5");
 
 #include "crop_devid.c"
diff --git a/arch/sparc/crypto/sha1_glue.c b/arch/sparc/crypto/sha1_glue.c
index 6cd5f29e1e0d592050602afff567598d1062b713..1b3e47accc7466a90fb5729321df4e48273d7f31 100644
--- a/arch/sparc/crypto/sha1_glue.c
+++ b/arch/sparc/crypto/sha1_glue.c
@@ -180,6 +180,6 @@ module_exit(sha1_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
 
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
 
 #include "crop_devid.c"
diff --git a/arch/sparc/crypto/sha256_glue.c b/arch/sparc/crypto/sha256_glue.c
index 04f555ab268002d16ca0d1d1bf69e6e8829d4d04..285268ca927937434877eeb39a1c1fcc307b0682 100644
--- a/arch/sparc/crypto/sha256_glue.c
+++ b/arch/sparc/crypto/sha256_glue.c
@@ -135,7 +135,7 @@ static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash)
 	sha256_sparc64_final(desc, D);
 
 	memcpy(hash, D, SHA224_DIGEST_SIZE);
-	memset(D, 0, SHA256_DIGEST_SIZE);
+	memzero_explicit(D, SHA256_DIGEST_SIZE);
 
 	return 0;
 }
@@ -237,7 +237,7 @@ module_exit(sha256_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated");
 
-MODULE_ALIAS("sha224");
-MODULE_ALIAS("sha256");
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
 
 #include "crop_devid.c"
diff --git a/arch/sparc/crypto/sha512_glue.c b/arch/sparc/crypto/sha512_glue.c
index f04d1994d19aa3acfc9286a82265a4a031b8ea5f..11eb36c3fc8c4a8ee6559914895474b44fd6ca89 100644
--- a/arch/sparc/crypto/sha512_glue.c
+++ b/arch/sparc/crypto/sha512_glue.c
@@ -139,7 +139,7 @@ static int sha384_sparc64_final(struct shash_desc *desc, u8 *hash)
 	sha512_sparc64_final(desc, D);
 
 	memcpy(hash, D, 48);
-	memset(D, 0, 64);
+	memzero_explicit(D, 64);
 
 	return 0;
 }
@@ -222,7 +222,7 @@ module_exit(sha512_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated");
 
-MODULE_ALIAS("sha384");
-MODULE_ALIAS("sha512");
+MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
 
 #include "crop_devid.c"
diff --git a/arch/x86/crypto/aes_glue.c b/arch/x86/crypto/aes_glue.c
index aafe8ce0d65dd4f68b04dab1fecbf33038b4031e..e26984f7ab8d2fa838168be1893c5eacdc375d33 100644
--- a/arch/x86/crypto/aes_glue.c
+++ b/arch/x86/crypto/aes_glue.c
@@ -66,5 +66,5 @@ module_exit(aes_fini);
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("aes");
-MODULE_ALIAS("aes-asm");
+MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("aes-asm");
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 888950f29fd90f09574db838251725c49e28934a..ae855f4f64b7755410701753f79608d0408880f9 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -43,10 +43,6 @@
 #include <asm/crypto/glue_helper.h>
 #endif
 
-#if defined(CONFIG_CRYPTO_PCBC) || defined(CONFIG_CRYPTO_PCBC_MODULE)
-#define HAS_PCBC
-#endif
-
 /* This data is stored at the end of the crypto_tfm struct.
  * It's a type of per "session" data storage location.
  * This needs to be 16 byte aligned.
@@ -547,7 +543,7 @@ static int ablk_ctr_init(struct crypto_tfm *tfm)
 
 #endif
 
-#ifdef HAS_PCBC
+#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
 static int ablk_pcbc_init(struct crypto_tfm *tfm)
 {
 	return ablk_init_common(tfm, "fpu(pcbc(__driver-aes-aesni))");
@@ -1377,7 +1373,7 @@ static struct crypto_alg aesni_algs[] = { {
 		},
 	},
 #endif
-#ifdef HAS_PCBC
+#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
 }, {
 	.cra_name		= "pcbc(aes)",
 	.cra_driver_name	= "pcbc-aes-aesni",
@@ -1550,4 +1546,4 @@ module_exit(aesni_exit);
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
diff --git a/arch/x86/crypto/blowfish_glue.c b/arch/x86/crypto/blowfish_glue.c
index 8af519ed73d122e316a0a6f1f5a410b7fe432d09..17c05531dfd1752234eb9b6835f848b12ed07817 100644
--- a/arch/x86/crypto/blowfish_glue.c
+++ b/arch/x86/crypto/blowfish_glue.c
@@ -478,5 +478,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
-MODULE_ALIAS("blowfish");
-MODULE_ALIAS("blowfish-asm");
+MODULE_ALIAS_CRYPTO("blowfish");
+MODULE_ALIAS_CRYPTO("blowfish-asm");
diff --git a/arch/x86/crypto/camellia_aesni_avx2_glue.c b/arch/x86/crypto/camellia_aesni_avx2_glue.c
index 4209a76fcdaad4225fb9c15fb954dfb5ad495c74..9a07fafe3831394c86da42758d3a1407628f03ab 100644
--- a/arch/x86/crypto/camellia_aesni_avx2_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx2_glue.c
@@ -582,5 +582,5 @@ module_exit(camellia_aesni_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized");
-MODULE_ALIAS("camellia");
-MODULE_ALIAS("camellia-asm");
+MODULE_ALIAS_CRYPTO("camellia");
+MODULE_ALIAS_CRYPTO("camellia-asm");
diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
index 87a041a10f4ac1fe7cceccc82eb24fcb03f92fa4..ed38d959add6a8aa09f3e32285de6372eea3a7e8 100644
--- a/arch/x86/crypto/camellia_aesni_avx_glue.c
+++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
@@ -574,5 +574,5 @@ module_exit(camellia_aesni_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized");
-MODULE_ALIAS("camellia");
-MODULE_ALIAS("camellia-asm");
+MODULE_ALIAS_CRYPTO("camellia");
+MODULE_ALIAS_CRYPTO("camellia-asm");
diff --git a/arch/x86/crypto/camellia_glue.c b/arch/x86/crypto/camellia_glue.c
index c171dcbf192d9b2ae1aa31c5b91f02ffb0b9220d..5c8b6266a394b45d4317d91b05adb6cc12b15219 100644
--- a/arch/x86/crypto/camellia_glue.c
+++ b/arch/x86/crypto/camellia_glue.c
@@ -1725,5 +1725,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized");
-MODULE_ALIAS("camellia");
-MODULE_ALIAS("camellia-asm");
+MODULE_ALIAS_CRYPTO("camellia");
+MODULE_ALIAS_CRYPTO("camellia-asm");
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index e57e20ab5e0bc49ffa098ec9e86ab396ee2b71ea..60ada677a92874e7d24fe00f1f309ce9c9a9a8bf 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -491,4 +491,4 @@ module_exit(cast5_exit);
 
 MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("cast5");
+MODULE_ALIAS_CRYPTO("cast5");
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 09f3677393e4b888895c83bae05d114df0d5184c..0160f68a57ff33113c1a1b030d7919b56d86669e 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -611,4 +611,4 @@ module_exit(cast6_exit);
 
 MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("cast6");
+MODULE_ALIAS_CRYPTO("cast6");
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index 9d014a74ef969ab91b2d2c61854052f0834c1c3b..1937fc1d876338aa0aa9bb5fddea9e0aa3541707 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -197,5 +197,5 @@ module_exit(crc32_pclmul_mod_fini);
 MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
 MODULE_LICENSE("GPL");
 
-MODULE_ALIAS("crc32");
-MODULE_ALIAS("crc32-pclmul");
+MODULE_ALIAS_CRYPTO("crc32");
+MODULE_ALIAS_CRYPTO("crc32-pclmul");
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 6812ad98355c3d0e93be5e07c47e5e3956ebc8a4..28640c3d6af7f6172a8fe39d4553c98019614e24 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -280,5 +280,5 @@ MODULE_AUTHOR("Austin Zhang <austin.zhang@intel.com>, Kent Liu <kent.liu@intel.c
 MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware.");
 MODULE_LICENSE("GPL");
 
-MODULE_ALIAS("crc32c");
-MODULE_ALIAS("crc32c-intel");
+MODULE_ALIAS_CRYPTO("crc32c");
+MODULE_ALIAS_CRYPTO("crc32c-intel");
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index 7845d7fd54c0aa56232ae6b17dd433e26c879bd4..b6c67bf30fdf6704f6d83b093ee73ae7d9b77fcf 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -147,5 +147,5 @@ MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
 MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
 MODULE_LICENSE("GPL");
 
-MODULE_ALIAS("crct10dif");
-MODULE_ALIAS("crct10dif-pclmul");
+MODULE_ALIAS_CRYPTO("crct10dif");
+MODULE_ALIAS_CRYPTO("crct10dif-pclmul");
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
index 0e9c0668fe4ec79d030e4c4441b0dde70c2efa36..38a14f818ef13f27cf7a7bb4b1de4064fbedf3f4 100644
--- a/arch/x86/crypto/des3_ede_glue.c
+++ b/arch/x86/crypto/des3_ede_glue.c
@@ -502,8 +502,8 @@ module_exit(des3_ede_x86_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
-MODULE_ALIAS("des3_ede");
-MODULE_ALIAS("des3_ede-asm");
-MODULE_ALIAS("des");
-MODULE_ALIAS("des-asm");
+MODULE_ALIAS_CRYPTO("des3_ede");
+MODULE_ALIAS_CRYPTO("des3_ede-asm");
+MODULE_ALIAS_CRYPTO("des");
+MODULE_ALIAS_CRYPTO("des-asm");
 MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
index 98d7a188f46b0744ea53942eeab09d609f7d4847..f368ba261739fa09be28bc02fe34cf3112099fa8 100644
--- a/arch/x86/crypto/fpu.c
+++ b/arch/x86/crypto/fpu.c
@@ -17,6 +17,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/crypto.h>
 #include <asm/i387.h>
 
 struct crypto_fpu_ctx {
@@ -159,3 +160,5 @@ void __exit crypto_fpu_exit(void)
 {
 	crypto_unregister_template(&crypto_fpu_tmpl);
 }
+
+MODULE_ALIAS_CRYPTO("fpu");
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 88bb7ba8b1753e56b59c5a755e3cde4fefe5e78e..8253d85aa16508f19249e1f3cee91356c565d1e3 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -341,4 +341,4 @@ module_exit(ghash_pclmulqdqni_mod_exit);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("GHASH Message Digest Algorithm, "
 		   "acclerated by PCLMULQDQ-NI");
-MODULE_ALIAS("ghash");
+MODULE_ALIAS_CRYPTO("ghash");
diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c
index 5e8e67739bb50a97c690139ff18517a2918d0386..399a29d067d6367603714633fb8c4de6ab77275a 100644
--- a/arch/x86/crypto/salsa20_glue.c
+++ b/arch/x86/crypto/salsa20_glue.c
@@ -119,5 +119,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
-MODULE_ALIAS("salsa20");
-MODULE_ALIAS("salsa20-asm");
+MODULE_ALIAS_CRYPTO("salsa20");
+MODULE_ALIAS_CRYPTO("salsa20-asm");
diff --git a/arch/x86/crypto/serpent_avx2_glue.c b/arch/x86/crypto/serpent_avx2_glue.c
index 2fae489b15246525991e6e606b8e01923e298263..437e47a4d302f584dfbeb3fef6ab76fc6e39008a 100644
--- a/arch/x86/crypto/serpent_avx2_glue.c
+++ b/arch/x86/crypto/serpent_avx2_glue.c
@@ -558,5 +558,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized");
-MODULE_ALIAS("serpent");
-MODULE_ALIAS("serpent-asm");
+MODULE_ALIAS_CRYPTO("serpent");
+MODULE_ALIAS_CRYPTO("serpent-asm");
diff --git a/arch/x86/crypto/serpent_avx_glue.c b/arch/x86/crypto/serpent_avx_glue.c
index ff487087097254f8368d035db08d02472dc9b76e..7e217398b4eb1d4656f59b1a90f707bd6028fe94 100644
--- a/arch/x86/crypto/serpent_avx_glue.c
+++ b/arch/x86/crypto/serpent_avx_glue.c
@@ -617,4 +617,4 @@ module_exit(serpent_exit);
 
 MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("serpent");
+MODULE_ALIAS_CRYPTO("serpent");
diff --git a/arch/x86/crypto/serpent_sse2_glue.c b/arch/x86/crypto/serpent_sse2_glue.c
index 8c95f86373061680f4d8f4418d4da77f9123c111..bf025adaea01bba090a09867490bd960a7674290 100644
--- a/arch/x86/crypto/serpent_sse2_glue.c
+++ b/arch/x86/crypto/serpent_sse2_glue.c
@@ -618,4 +618,4 @@ module_exit(serpent_sse2_exit);
 
 MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("serpent");
+MODULE_ALIAS_CRYPTO("serpent");
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index 99eefd812958464bac221d704ac3aee8766b1c3a..a225a5ca1037ede0a9fa8ebb109a43c22523ffaa 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -204,8 +204,7 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, str
 			continue;
 		}
 
-		if (ctx)
-			ctx->status = HASH_CTX_STS_IDLE;
+		ctx->status = HASH_CTX_STS_IDLE;
 		return ctx;
 	}
 
diff --git a/arch/x86/crypto/sha1_ssse3_glue.c b/arch/x86/crypto/sha1_ssse3_glue.c
index 74d16ef707c790ae1837d9ea513d63b0499bb9ef..6c20fe04a738df08e5428fb714e7c93770fcf748 100644
--- a/arch/x86/crypto/sha1_ssse3_glue.c
+++ b/arch/x86/crypto/sha1_ssse3_glue.c
@@ -278,4 +278,4 @@ module_exit(sha1_ssse3_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
 
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/x86/crypto/sha256_ssse3_glue.c b/arch/x86/crypto/sha256_ssse3_glue.c
index f248546da1caa956014dfd9648814ad37f2ca8d3..8fad72f4dfd2e5b741cea2c059ee4605d1a28ea1 100644
--- a/arch/x86/crypto/sha256_ssse3_glue.c
+++ b/arch/x86/crypto/sha256_ssse3_glue.c
@@ -211,7 +211,7 @@ static int sha224_ssse3_final(struct shash_desc *desc, u8 *hash)
 	sha256_ssse3_final(desc, D);
 
 	memcpy(hash, D, SHA224_DIGEST_SIZE);
-	memset(D, 0, SHA256_DIGEST_SIZE);
+	memzero_explicit(D, SHA256_DIGEST_SIZE);
 
 	return 0;
 }
@@ -318,5 +318,5 @@ module_exit(sha256_ssse3_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
 
-MODULE_ALIAS("sha256");
-MODULE_ALIAS("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha224");
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index 8626b03e83b75e22d45247531a7e3f72392cbaf9..0b6af26832bf4e7ece867a6bbbadd474c2420386 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -219,7 +219,7 @@ static int sha384_ssse3_final(struct shash_desc *desc, u8 *hash)
 	sha512_ssse3_final(desc, D);
 
 	memcpy(hash, D, SHA384_DIGEST_SIZE);
-	memset(D, 0, SHA512_DIGEST_SIZE);
+	memzero_explicit(D, SHA512_DIGEST_SIZE);
 
 	return 0;
 }
@@ -326,5 +326,5 @@ module_exit(sha512_ssse3_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
 
-MODULE_ALIAS("sha512");
-MODULE_ALIAS("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
+MODULE_ALIAS_CRYPTO("sha384");
diff --git a/arch/x86/crypto/twofish_avx_glue.c b/arch/x86/crypto/twofish_avx_glue.c
index 4e3c665be1296f16cedde6f402249316b75bc4ed..1ac531ea9bccca4d4eca5af299e54ff2bd6aa514 100644
--- a/arch/x86/crypto/twofish_avx_glue.c
+++ b/arch/x86/crypto/twofish_avx_glue.c
@@ -579,4 +579,4 @@ module_exit(twofish_exit);
 
 MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("twofish");
+MODULE_ALIAS_CRYPTO("twofish");
diff --git a/arch/x86/crypto/twofish_glue.c b/arch/x86/crypto/twofish_glue.c
index 0a5202303501e29215e5f5bb9e143417ddd35452..77e06c2da83d0ec5a9e468759727c0c25baca5b5 100644
--- a/arch/x86/crypto/twofish_glue.c
+++ b/arch/x86/crypto/twofish_glue.c
@@ -96,5 +96,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
-MODULE_ALIAS("twofish");
-MODULE_ALIAS("twofish-asm");
+MODULE_ALIAS_CRYPTO("twofish");
+MODULE_ALIAS_CRYPTO("twofish-asm");
diff --git a/arch/x86/crypto/twofish_glue_3way.c b/arch/x86/crypto/twofish_glue_3way.c
index 13e63b3e1dfb44593ea2274a63adebfbdce7e6ce..56d8a08ee47908d06c80975428655629b5749c24 100644
--- a/arch/x86/crypto/twofish_glue_3way.c
+++ b/arch/x86/crypto/twofish_glue_3way.c
@@ -495,5 +495,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
-MODULE_ALIAS("twofish");
-MODULE_ALIAS("twofish-asm");
+MODULE_ALIAS_CRYPTO("twofish");
+MODULE_ALIAS_CRYPTO("twofish-asm");
diff --git a/crypto/842.c b/crypto/842.c
index 65c7a89cfa090a1a3a7bff6ad2f1ab9d40e1701f..b48f4f108c474104d484ceb316099ef78b20f5d3 100644
--- a/crypto/842.c
+++ b/crypto/842.c
@@ -180,3 +180,4 @@ module_exit(nx842_mod_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("842 Compression Algorithm");
+MODULE_ALIAS_CRYPTO("842");
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index fd0d6b454975c4dba37c5d4cbaff179110351431..9b3c54c1cbe826a8cb031a9affb9079f0961d1c4 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1474,4 +1474,4 @@ module_exit(aes_fini);
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index bc21f520d489d0d3d445269ff282faf1b80c1905..1fa7bc31be63b9fb774782ebe3fbdaed0b16ff72 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -421,6 +421,12 @@ int af_alg_cmsg_send(struct msghdr *msg, struct af_alg_control *con)
 			con->op = *(u32 *)CMSG_DATA(cmsg);
 			break;
 
+		case ALG_SET_AEAD_ASSOCLEN:
+			if (cmsg->cmsg_len < CMSG_LEN(sizeof(u32)))
+				return -EINVAL;
+			con->aead_assoclen = *(u32 *)CMSG_DATA(cmsg);
+			break;
+
 		default:
 			return -EINVAL;
 		}
diff --git a/crypto/algapi.c b/crypto/algapi.c
index e8d3a7dca8c4887f0df7954baee7e77637b0cfce..71a8143e23b13390ec020ffb7f1168ae9ca285fc 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -509,8 +509,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
 
 struct crypto_template *crypto_lookup_template(const char *name)
 {
-	return try_then_request_module(__crypto_lookup_template(name), "%s",
-				       name);
+	return try_then_request_module(__crypto_lookup_template(name),
+				       "crypto-%s", name);
 }
 EXPORT_SYMBOL_GPL(crypto_lookup_template);
 
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 83cd2cc49c9f2647755282a2ba1cf4b5f2e122b9..01f56eb7816ee4320a9acd9632c3e4b5bc3233ee 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -258,8 +258,8 @@ static void hash_sock_destruct(struct sock *sk)
 	struct alg_sock *ask = alg_sk(sk);
 	struct hash_ctx *ctx = ask->private;
 
-	sock_kfree_s(sk, ctx->result,
-		     crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
+	sock_kzfree_s(sk, ctx->result,
+		      crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
 	sock_kfree_s(sk, ctx, ctx->len);
 	af_alg_release_parent(sk);
 }
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 4f45dab24648f267ae562dc2a6d266475ec11a14..c12207c8dde9e6b6a5365783f299623e7a221914 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -251,6 +251,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
 	struct af_alg_control con = {};
 	long copied = 0;
 	bool enc = 0;
+	bool init = 0;
 	int err;
 	int i;
 
@@ -259,6 +260,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
 		if (err)
 			return err;
 
+		init = 1;
 		switch (con.op) {
 		case ALG_OP_ENCRYPT:
 			enc = 1;
@@ -280,7 +282,7 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
 	if (!ctx->more && ctx->used)
 		goto unlock;
 
-	if (!ctx->used) {
+	if (init) {
 		ctx->enc = enc;
 		if (con.iv)
 			memcpy(ctx->iv, con.iv->iv, ivsize);
@@ -359,8 +361,6 @@ static int skcipher_sendmsg(struct kiocb *unused, struct socket *sock,
 	err = 0;
 
 	ctx->more = msg->msg_flags & MSG_MORE;
-	if (!ctx->more && !list_empty(&ctx->tsgl))
-		sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
 
 unlock:
 	skcipher_data_wakeup(sk);
@@ -408,8 +408,6 @@ static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
 
 done:
 	ctx->more = flags & MSG_MORE;
-	if (!ctx->more && !list_empty(&ctx->tsgl))
-		sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
 
 unlock:
 	skcipher_data_wakeup(sk);
@@ -448,14 +446,13 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock,
 			while (!sg->length)
 				sg++;
 
-			used = ctx->used;
-			if (!used) {
+			if (!ctx->used) {
 				err = skcipher_wait_for_data(sk, flags);
 				if (err)
 					goto unlock;
 			}
 
-			used = min_t(unsigned long, used, seglen);
+			used = min_t(unsigned long, ctx->used, seglen);
 
 			used = af_alg_make_sg(&ctx->rsgl, from, used, 1);
 			err = used;
@@ -566,7 +563,7 @@ static void skcipher_sock_destruct(struct sock *sk)
 	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
 
 	skcipher_free_sgl(sk);
-	sock_kfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
+	sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
 	sock_kfree_s(sk, ctx, ctx->len);
 	af_alg_release_parent(sk);
 }
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index 666f1962a160f5d547579b918b6229de0232607b..b4485a108389a2f13b0ca28949e4f6b932818277 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -476,4 +476,4 @@ module_param(dbg, int, 0);
 MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
 module_init(prng_mod_init);
 module_exit(prng_mod_fini);
-MODULE_ALIAS("stdrng");
+MODULE_ALIAS_CRYPTO("stdrng");
diff --git a/crypto/anubis.c b/crypto/anubis.c
index 008c8a4fb67ca77de02bc1c0359b4ef77c2b452d..4bb187c2a9027bab28e82370bd54f128602a25f1 100644
--- a/crypto/anubis.c
+++ b/crypto/anubis.c
@@ -704,3 +704,4 @@ module_exit(anubis_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
+MODULE_ALIAS_CRYPTO("anubis");
diff --git a/crypto/api.c b/crypto/api.c
index a2b39c5f3649de2c513ecbec7fd45c2fd6bbdaf4..2a81e98a0021074b9ce8e2ac54eb876c1dcf7607 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -216,11 +216,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
 
 	alg = crypto_alg_lookup(name, type, mask);
 	if (!alg) {
-		request_module("%s", name);
+		request_module("crypto-%s", name);
 
 		if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
 		      CRYPTO_ALG_NEED_FALLBACK))
-			request_module("%s-all", name);
+			request_module("crypto-%s-all", name);
 
 		alg = crypto_alg_lookup(name, type, mask);
 	}
diff --git a/crypto/arc4.c b/crypto/arc4.c
index 5a772c3657d58d55c5bb453dbd6cc73a2e2a7bea..f1a81925558fa196650e3973daa44315caa613b0 100644
--- a/crypto/arc4.c
+++ b/crypto/arc4.c
@@ -166,3 +166,4 @@ module_exit(arc4_exit);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
 MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>");
+MODULE_ALIAS_CRYPTO("arc4");
diff --git a/crypto/authenc.c b/crypto/authenc.c
index e1223559d5dfd233cbc15184c9dc607fb34fae2e..78fb16cab13f9660553a0705941608ec3a59d88a 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -721,3 +721,4 @@ module_exit(crypto_authenc_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec");
+MODULE_ALIAS_CRYPTO("authenc");
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 4be0dd4373a9a2e8a15b62adbf8dbdd6e374eacd..024bff2344fcff9d0ae3af5c04b4a75280dd2d4a 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -814,3 +814,4 @@ module_exit(crypto_authenc_esn_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
+MODULE_ALIAS_CRYPTO("authencesn");
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 8baf5447d35b58f70bc43fa1bfb6f3de896813ae..7bd71f02d0dde233939716f3b0059cc758ab788c 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -138,4 +138,4 @@ module_exit(blowfish_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
-MODULE_ALIAS("blowfish");
+MODULE_ALIAS_CRYPTO("blowfish");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 26bcd7a2d6b473a2b2d1b12edd51b271705a46f0..1b74c5a3e8910741cac8c92e292b041eff40e714 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -1098,4 +1098,4 @@ module_exit(camellia_fini);
 
 MODULE_DESCRIPTION("Camellia Cipher Algorithm");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("camellia");
+MODULE_ALIAS_CRYPTO("camellia");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index 5558f630a0ebd254462cf6fc72486ea8d00bccd0..84c86db67ec7a88a85fd92a93ad07af6eb935564 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -549,4 +549,4 @@ module_exit(cast5_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
-MODULE_ALIAS("cast5");
+MODULE_ALIAS_CRYPTO("cast5");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index de732528a43042b0445dba87145653b0ce4f313a..f408f0bd8de2525ac369ae68c4bd5a5187b22e1d 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -291,4 +291,4 @@ module_exit(cast6_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
-MODULE_ALIAS("cast6");
+MODULE_ALIAS_CRYPTO("cast6");
diff --git a/crypto/cbc.c b/crypto/cbc.c
index 61ac42e1e32bb75816c0c1b0a7dd614cd80f4474..780ee27b2d43d5f3620d32c4df6c00670b6c48c8 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -289,3 +289,4 @@ module_exit(crypto_cbc_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("CBC block cipher algorithm");
+MODULE_ALIAS_CRYPTO("cbc");
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 1df84217f7c9320dbfc5174f91ce394667c36447..003bbbd21a2ba9a24f4207f20037d5ee75593726 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -879,5 +879,6 @@ module_exit(crypto_ccm_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Counter with CBC MAC");
-MODULE_ALIAS("ccm_base");
-MODULE_ALIAS("rfc4309");
+MODULE_ALIAS_CRYPTO("ccm_base");
+MODULE_ALIAS_CRYPTO("rfc4309");
+MODULE_ALIAS_CRYPTO("ccm");
diff --git a/crypto/chainiv.c b/crypto/chainiv.c
index 9c294c8f9a07888a7d3f5a9210da334c26218f77..63c17d5992f79b5a5ed40a526fa58a532eb3e5dc 100644
--- a/crypto/chainiv.c
+++ b/crypto/chainiv.c
@@ -359,3 +359,4 @@ module_exit(chainiv_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Chain IV Generator");
+MODULE_ALIAS_CRYPTO("chainiv");
diff --git a/crypto/cmac.c b/crypto/cmac.c
index 50880cf17fad702f9251ac7af4839842fe0ae8d5..7a8bfbd548f60835fbf417ab96e43b749a5d283f 100644
--- a/crypto/cmac.c
+++ b/crypto/cmac.c
@@ -313,3 +313,4 @@ module_exit(crypto_cmac_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("CMAC keyed hash algorithm");
+MODULE_ALIAS_CRYPTO("cmac");
diff --git a/crypto/crc32.c b/crypto/crc32.c
index 9d1c41569898a96e05b418344545ae6d01e8d00e..187ded28cb0bd76825475dfd3b4684d8043de752 100644
--- a/crypto/crc32.c
+++ b/crypto/crc32.c
@@ -156,3 +156,4 @@ module_exit(crc32_mod_fini);
 MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
 MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("crc32");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index d9c7beba8e502b8006af530c46ff33c75a5d97c1..2a062025749d925f858939933ebe67283f158562 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -170,5 +170,5 @@ module_exit(crc32c_mod_fini);
 MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
 MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("crc32c");
+MODULE_ALIAS_CRYPTO("crc32c");
 MODULE_SOFTDEP("pre: crc32c");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 877e7114ec5cbdfac1f63261e16f50133ccda2d2..08bb4f50452085b65c0ed263a84f5c8298142149 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -124,4 +124,4 @@ module_exit(crct10dif_mod_fini);
 MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
 MODULE_DESCRIPTION("T10 DIF CRC calculation.");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("crct10dif");
+MODULE_ALIAS_CRYPTO("crct10dif");
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index e592c90abebb70f9f7e3966dba9b2dbd38a750d6..650afac10fd78e5ba51a28d7161ee71bcd6f3175 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -955,3 +955,4 @@ module_exit(cryptd_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Software async crypto daemon");
+MODULE_ALIAS_CRYPTO("cryptd");
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 1dc54bb95a876b838f75b46f64c285edf1e064d8..a20319132e338e7a8e606f9f8d04b3d310741857 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -145,9 +145,9 @@ static struct crypto_alg null_algs[3] = { {
 	.coa_decompress		=	null_compress } }
 } };
 
-MODULE_ALIAS("compress_null");
-MODULE_ALIAS("digest_null");
-MODULE_ALIAS("cipher_null");
+MODULE_ALIAS_CRYPTO("compress_null");
+MODULE_ALIAS_CRYPTO("digest_null");
+MODULE_ALIAS_CRYPTO("cipher_null");
 
 static int __init crypto_null_mod_init(void)
 {
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index e2a34feec7a4cf80199e651032c70eee8d94460b..c5148a35ae0a38c696f22eaba3c0b860ab8df1c9 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -201,10 +201,7 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
 	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
 		return -EINVAL;
 
-	if (!p->cru_driver_name[0])
-		return -EINVAL;
-
-	alg = crypto_alg_match(p, 1);
+	alg = crypto_alg_match(p, 0);
 	if (!alg)
 		return -ENOENT;
 
@@ -537,3 +534,4 @@ module_exit(crypto_user_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 MODULE_DESCRIPTION("Crypto userspace configuration API");
+MODULE_ALIAS("net-pf-16-proto-21");
diff --git a/crypto/ctr.c b/crypto/ctr.c
index f2b94f27bb2cf9ac2b49ffa88d527f4f8ef98961..2386f731395207a2432d782d10c1421a0577dff3 100644
--- a/crypto/ctr.c
+++ b/crypto/ctr.c
@@ -466,4 +466,5 @@ module_exit(crypto_ctr_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("CTR Counter block mode");
-MODULE_ALIAS("rfc3686");
+MODULE_ALIAS_CRYPTO("rfc3686");
+MODULE_ALIAS_CRYPTO("ctr");
diff --git a/crypto/cts.c b/crypto/cts.c
index 133f0874c95eccc7e02c26f834afed08a504a0a9..bd9405820e8ac5ade61d6d6793ec18510d456ad9 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -351,3 +351,4 @@ module_exit(crypto_cts_module_exit);
 
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC");
+MODULE_ALIAS_CRYPTO("cts");
diff --git a/crypto/deflate.c b/crypto/deflate.c
index b57d70eb156b8c424b87d217c59f4700a0d3edca..95d8d37c502183b9e426925928aa5c578523cb7d 100644
--- a/crypto/deflate.c
+++ b/crypto/deflate.c
@@ -222,4 +222,4 @@ module_exit(deflate_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
-
+MODULE_ALIAS_CRYPTO("deflate");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 298d464ab7d2564cfcce14862e52201a5530df2c..42912948776b1426ec71f2e6fe3d5debd3d67e72 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -983,7 +983,7 @@ static struct crypto_alg des_algs[2] = { {
 	.cia_decrypt		=	des3_ede_decrypt } }
 } };
 
-MODULE_ALIAS("des3_ede");
+MODULE_ALIAS_CRYPTO("des3_ede");
 
 static int __init des_generic_mod_init(void)
 {
diff --git a/crypto/drbg.c b/crypto/drbg.c
index 54cfd4820abc8f321fe1dac7c0212107d16d521d..d748a1d0ca24b5c646953cd6a6cdfa240c4669e0 100644
--- a/crypto/drbg.c
+++ b/crypto/drbg.c
@@ -98,6 +98,7 @@
  */
 
 #include <crypto/drbg.h>
+#include <linux/string.h>
 
 /***************************************************************
  * Backend cipher definitions available to DRBG
@@ -283,38 +284,6 @@ static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
 
 	conversion->conv = cpu_to_be32(val);
 }
-
-/*
- * Increment buffer
- *
- * @dst buffer to increment
- * @add value to add
- */
-static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
-				const unsigned char *add, size_t addlen)
-{
-	/* implied: dstlen > addlen */
-	unsigned char *dstptr;
-	const unsigned char *addptr;
-	unsigned int remainder = 0;
-	size_t len = addlen;
-
-	dstptr = dst + (dstlen-1);
-	addptr = add + (addlen-1);
-	while (len) {
-		remainder += *dstptr + *addptr;
-		*dstptr = remainder & 0xff;
-		remainder >>= 8;
-		len--; dstptr--; addptr--;
-	}
-	len = dstlen - addlen;
-	while (len && remainder > 0) {
-		remainder = *dstptr + 1;
-		*dstptr = remainder & 0xff;
-		remainder >>= 8;
-		len--; dstptr--;
-	}
-}
 #endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */
 
 /******************************************************************
@@ -323,6 +292,13 @@ static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
 
 #ifdef CONFIG_CRYPTO_DRBG_CTR
 #define CRYPTO_DRBG_CTR_STRING "CTR "
+MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes256");
+MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes256");
+MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes192");
+MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192");
+MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128");
+MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128");
+
 static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
 			  unsigned char *outval, const struct drbg_string *in);
 static int drbg_init_sym_kernel(struct drbg_state *drbg);
@@ -522,9 +498,9 @@ static int drbg_ctr_df(struct drbg_state *drbg,
 	ret = 0;
 
 out:
-	memset(iv, 0, drbg_blocklen(drbg));
-	memset(temp, 0, drbg_statelen(drbg));
-	memset(pad, 0, drbg_blocklen(drbg));
+	memzero_explicit(iv, drbg_blocklen(drbg));
+	memzero_explicit(temp, drbg_statelen(drbg));
+	memzero_explicit(pad, drbg_blocklen(drbg));
 	return ret;
 }
 
@@ -554,7 +530,6 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
 	unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */
 	unsigned int len = 0;
 	struct drbg_string cipherin;
-	unsigned char prefix = DRBG_PREFIX1;
 
 	memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
 	if (3 > reseed)
@@ -574,7 +549,7 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
 	 */
 	while (len < (drbg_statelen(drbg))) {
 		/* 10.2.1.2 step 2.1 */
-		drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+		crypto_inc(drbg->V, drbg_blocklen(drbg));
 		/*
 		 * 10.2.1.2 step 2.2 */
 		ret = drbg_kcapi_sym(drbg, drbg->C, temp + len, &cipherin);
@@ -599,9 +574,9 @@ static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
 	ret = 0;
 
 out:
-	memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
+	memzero_explicit(temp, drbg_statelen(drbg) + drbg_blocklen(drbg));
 	if (2 != reseed)
-		memset(df_data, 0, drbg_statelen(drbg));
+		memzero_explicit(df_data, drbg_statelen(drbg));
 	return ret;
 }
 
@@ -617,7 +592,6 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
 	int len = 0;
 	int ret = 0;
 	struct drbg_string data;
-	unsigned char prefix = DRBG_PREFIX1;
 
 	memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
 
@@ -629,7 +603,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
 	}
 
 	/* 10.2.1.5.2 step 4.1 */
-	drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+	crypto_inc(drbg->V, drbg_blocklen(drbg));
 	drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg));
 	while (len < buflen) {
 		int outlen = 0;
@@ -643,7 +617,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
 			  drbg_blocklen(drbg) : (buflen - len);
 		if (!drbg_fips_continuous_test(drbg, drbg->scratchpad)) {
 			/* 10.2.1.5.2 step 6 */
-			drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+			crypto_inc(drbg->V, drbg_blocklen(drbg));
 			continue;
 		}
 		/* 10.2.1.5.2 step 4.3 */
@@ -651,7 +625,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
 		len += outlen;
 		/* 10.2.1.5.2 step 6 */
 		if (len < buflen)
-			drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+			crypto_inc(drbg->V, drbg_blocklen(drbg));
 	}
 
 	/* 10.2.1.5.2 step 6 */
@@ -660,7 +634,7 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
 		len = ret;
 
 out:
-	memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+	memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
 	return len;
 }
 
@@ -685,6 +659,15 @@ static int drbg_fini_hash_kernel(struct drbg_state *drbg);
 
 #ifdef CONFIG_CRYPTO_DRBG_HMAC
 #define CRYPTO_DRBG_HMAC_STRING "HMAC "
+MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha512");
+MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha512");
+MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha384");
+MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha384");
+MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha256");
+MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha256");
+MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha1");
+MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha1");
+
 /* update function of HMAC DRBG as defined in 10.1.2.2 */
 static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed,
 			    int reseed)
@@ -796,6 +779,47 @@ static struct drbg_state_ops drbg_hmac_ops = {
 
 #ifdef CONFIG_CRYPTO_DRBG_HASH
 #define CRYPTO_DRBG_HASH_STRING "HASH "
+MODULE_ALIAS_CRYPTO("drbg_pr_sha512");
+MODULE_ALIAS_CRYPTO("drbg_nopr_sha512");
+MODULE_ALIAS_CRYPTO("drbg_pr_sha384");
+MODULE_ALIAS_CRYPTO("drbg_nopr_sha384");
+MODULE_ALIAS_CRYPTO("drbg_pr_sha256");
+MODULE_ALIAS_CRYPTO("drbg_nopr_sha256");
+MODULE_ALIAS_CRYPTO("drbg_pr_sha1");
+MODULE_ALIAS_CRYPTO("drbg_nopr_sha1");
+
+/*
+ * Increment buffer
+ *
+ * @dst buffer to increment
+ * @add value to add
+ */
+static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
+				const unsigned char *add, size_t addlen)
+{
+	/* implied: dstlen > addlen */
+	unsigned char *dstptr;
+	const unsigned char *addptr;
+	unsigned int remainder = 0;
+	size_t len = addlen;
+
+	dstptr = dst + (dstlen-1);
+	addptr = add + (addlen-1);
+	while (len) {
+		remainder += *dstptr + *addptr;
+		*dstptr = remainder & 0xff;
+		remainder >>= 8;
+		len--; dstptr--; addptr--;
+	}
+	len = dstlen - addlen;
+	while (len && remainder > 0) {
+		remainder = *dstptr + 1;
+		*dstptr = remainder & 0xff;
+		remainder >>= 8;
+		len--; dstptr--;
+	}
+}
+
 /*
  * scratchpad usage: as drbg_hash_update and drbg_hash_df are used
  * interlinked, the scratchpad is used as follows:
@@ -848,7 +872,7 @@ static int drbg_hash_df(struct drbg_state *drbg,
 	}
 
 out:
-	memset(tmp, 0, drbg_blocklen(drbg));
+	memzero_explicit(tmp, drbg_blocklen(drbg));
 	return ret;
 }
 
@@ -892,7 +916,7 @@ static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
 	ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2);
 
 out:
-	memset(drbg->scratchpad, 0, drbg_statelen(drbg));
+	memzero_explicit(drbg->scratchpad, drbg_statelen(drbg));
 	return ret;
 }
 
@@ -927,7 +951,7 @@ static int drbg_hash_process_addtl(struct drbg_state *drbg,
 		     drbg->scratchpad, drbg_blocklen(drbg));
 
 out:
-	memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+	memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
 	return ret;
 }
 
@@ -942,7 +966,6 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
 	unsigned char *dst = drbg->scratchpad + drbg_statelen(drbg);
 	struct drbg_string data;
 	LIST_HEAD(datalist);
-	unsigned char prefix = DRBG_PREFIX1;
 
 	memset(src, 0, drbg_statelen(drbg));
 	memset(dst, 0, drbg_blocklen(drbg));
@@ -963,7 +986,7 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
 		outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
 			  drbg_blocklen(drbg) : (buflen - len);
 		if (!drbg_fips_continuous_test(drbg, dst)) {
-			drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1);
+			crypto_inc(src, drbg_statelen(drbg));
 			continue;
 		}
 		/* 10.1.1.4 step hashgen 4.2 */
@@ -971,11 +994,11 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
 		len += outlen;
 		/* 10.1.1.4 hashgen step 4.3 */
 		if (len < buflen)
-			drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1);
+			crypto_inc(src, drbg_statelen(drbg));
 	}
 
 out:
-	memset(drbg->scratchpad, 0,
+	memzero_explicit(drbg->scratchpad,
 	       (drbg_statelen(drbg) + drbg_blocklen(drbg)));
 	return len;
 }
@@ -1024,7 +1047,7 @@ static int drbg_hash_generate(struct drbg_state *drbg,
 	drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8);
 
 out:
-	memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+	memzero_explicit(drbg->scratchpad, drbg_blocklen(drbg));
 	return len;
 }
 
diff --git a/crypto/ecb.c b/crypto/ecb.c
index 935cfef4aa8479c54cd6490db8a9b97da9125ab5..12011aff097136331f5aca539acb487746efcd69 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -185,3 +185,4 @@ module_exit(crypto_ecb_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("ECB block cipher algorithm");
+MODULE_ALIAS_CRYPTO("ecb");
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c
index bf7ab4a89493cdb09b5d4affbd255c6d4f718029..f116fae766f81611c5530a6d92313c5e88d23fe1 100644
--- a/crypto/eseqiv.c
+++ b/crypto/eseqiv.c
@@ -267,3 +267,4 @@ module_exit(eseqiv_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
+MODULE_ALIAS_CRYPTO("eseqiv");
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
index 021d7fec6bc89e9b70af87b1e66a310268da28bb..77286ea28865bf4ce5d97f4a556b8aeeed443b14 100644
--- a/crypto/fcrypt.c
+++ b/crypto/fcrypt.c
@@ -420,3 +420,4 @@ module_exit(fcrypt_mod_fini);
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_DESCRIPTION("FCrypt Cipher Algorithm");
 MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
+MODULE_ALIAS_CRYPTO("fcrypt");
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 276cdac567b6ad063cbcdec9113fdb0ed2882c4d..2e403f6138c14bbcb048d59256f0fb40e2032f2d 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -1441,6 +1441,7 @@ module_exit(crypto_gcm_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Galois/Counter Mode");
 MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>");
-MODULE_ALIAS("gcm_base");
-MODULE_ALIAS("rfc4106");
-MODULE_ALIAS("rfc4543");
+MODULE_ALIAS_CRYPTO("gcm_base");
+MODULE_ALIAS_CRYPTO("rfc4106");
+MODULE_ALIAS_CRYPTO("rfc4543");
+MODULE_ALIAS_CRYPTO("gcm");
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 9d3f0c69a86ff2caeb33b7cddccbf8a80c7f7926..4e97fae9666f6fd549235ea60c93f999ad00699c 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -172,4 +172,4 @@ module_exit(ghash_mod_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
-MODULE_ALIAS("ghash");
+MODULE_ALIAS_CRYPTO("ghash");
diff --git a/crypto/hmac.c b/crypto/hmac.c
index e392219ddc61953054cd834b53a35aa3fc8eb9cd..72e38c098bb3184d3886ac4054f123dca3bb49ac 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -268,3 +268,4 @@ module_exit(hmac_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("HMAC hash algorithm");
+MODULE_ALIAS_CRYPTO("hmac");
diff --git a/crypto/khazad.c b/crypto/khazad.c
index 60e7cd66facc81c0ae051b4c71e3f220144ef6b8..873eb5ded6d7ae2f24e96221a131d5936d4101bd 100644
--- a/crypto/khazad.c
+++ b/crypto/khazad.c
@@ -880,3 +880,4 @@ module_exit(khazad_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
+MODULE_ALIAS_CRYPTO("khazad");
diff --git a/crypto/krng.c b/crypto/krng.c
index a2d2b72fc135b428102141ab181cc802994d1178..67c88b3312107c7c16e9732fa9ffba38172629f4 100644
--- a/crypto/krng.c
+++ b/crypto/krng.c
@@ -62,4 +62,4 @@ module_exit(krng_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Kernel Random Number Generator");
-MODULE_ALIAS("stdrng");
+MODULE_ALIAS_CRYPTO("stdrng");
diff --git a/crypto/lrw.c b/crypto/lrw.c
index ba42acc4deba8059c65dff053faef591e3175517..6f9908a7ebcbe19f76a4ee306f326f291aff8de9 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -400,3 +400,4 @@ module_exit(crypto_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("LRW block cipher mode");
+MODULE_ALIAS_CRYPTO("lrw");
diff --git a/crypto/lz4.c b/crypto/lz4.c
index 34d072b72a734e14d988aa8b9023ea9069d23079..aefbceaf3104f0df1904dbae4d0c193152f62c07 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -104,3 +104,4 @@ module_exit(lz4_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("LZ4 Compression Algorithm");
+MODULE_ALIAS_CRYPTO("lz4");
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index 9218b3fed5e377ff085093e56e80f0d498e605fa..a1d3b5bd3d85118c681098382fb2dd00adc1dac8 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -104,3 +104,4 @@ module_exit(lz4hc_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("LZ4HC Compression Algorithm");
+MODULE_ALIAS_CRYPTO("lz4hc");
diff --git a/crypto/lzo.c b/crypto/lzo.c
index a8ff2f778dc494ba909b3f4705db6512d296f0e5..4b3e92525dac5a47f3415f46c9eb403813b44275 100644
--- a/crypto/lzo.c
+++ b/crypto/lzo.c
@@ -107,3 +107,4 @@ module_exit(lzo_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("LZO Compression Algorithm");
+MODULE_ALIAS_CRYPTO("lzo");
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index b39fbd53010209e8776d7756244d5bd76aeb96e3..a8e870444ea9cef0ab4a2c6c919724905c7dbde7 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -703,3 +703,4 @@ module_exit(mcryptd_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
+MODULE_ALIAS_CRYPTO("mcryptd");
diff --git a/crypto/md4.c b/crypto/md4.c
index 0477a6a01d58258eb07441561cb99640a2c3bd00..3515af425cc917b60669c6dbb1a460f1da1919c4 100644
--- a/crypto/md4.c
+++ b/crypto/md4.c
@@ -255,4 +255,4 @@ module_exit(md4_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MD4 Message Digest Algorithm");
-
+MODULE_ALIAS_CRYPTO("md4");
diff --git a/crypto/md5.c b/crypto/md5.c
index 7febeaab923bcd724f607ca536ae6ab202e182ea..36f5e5b103f302dbeda611466fb97ab94b87511f 100644
--- a/crypto/md5.c
+++ b/crypto/md5.c
@@ -168,3 +168,4 @@ module_exit(md5_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
+MODULE_ALIAS_CRYPTO("md5");
diff --git a/crypto/michael_mic.c b/crypto/michael_mic.c
index 079b761bc70d125b241da7d13d30d7cef8b43846..46195e0d0f4d1d30dd20b3bdc8f987ea14176643 100644
--- a/crypto/michael_mic.c
+++ b/crypto/michael_mic.c
@@ -184,3 +184,4 @@ module_exit(michael_mic_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Michael MIC");
 MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");
+MODULE_ALIAS_CRYPTO("michael_mic");
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index d1b8bdfb58551e5b7844ec423f834f468fe9f3f1..f654965f09338dab066795d8c6ab8618ef263d93 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -295,3 +295,4 @@ module_exit(crypto_pcbc_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("PCBC block cipher algorithm");
+MODULE_ALIAS_CRYPTO("pcbc");
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index 309d345ead9582896bde8c5f5c170bf5d0315f66..c305d4112735cd3b7b18b477299b534b8c0e1b77 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -565,3 +565,4 @@ module_exit(pcrypt_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 MODULE_DESCRIPTION("Parallel crypto wrapper");
+MODULE_ALIAS_CRYPTO("pcrypt");
diff --git a/crypto/rmd128.c b/crypto/rmd128.c
index 8a0f68b7f257fa08cb61e56f5cd778cc5fdafe38..049486ede938faa3ecc5254d91d3da529467d388 100644
--- a/crypto/rmd128.c
+++ b/crypto/rmd128.c
@@ -327,3 +327,4 @@ module_exit(rmd128_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
+MODULE_ALIAS_CRYPTO("rmd128");
diff --git a/crypto/rmd160.c b/crypto/rmd160.c
index 525d7bb752cf6a7317d728c57dd3e5528bf7d6bb..de585e51d455f9a5070b62a59540b01cf096b8ec 100644
--- a/crypto/rmd160.c
+++ b/crypto/rmd160.c
@@ -371,3 +371,4 @@ module_exit(rmd160_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
+MODULE_ALIAS_CRYPTO("rmd160");
diff --git a/crypto/rmd256.c b/crypto/rmd256.c
index 69293d9b56e0c4dca0ee4e739386ceb81e1a9173..4ec02a754e0992e5700fe683f89dde754fa1d1b8 100644
--- a/crypto/rmd256.c
+++ b/crypto/rmd256.c
@@ -346,3 +346,4 @@ module_exit(rmd256_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
+MODULE_ALIAS_CRYPTO("rmd256");
diff --git a/crypto/rmd320.c b/crypto/rmd320.c
index 09f97dfdfbba37b50b4c6a76fde33d844561c21d..770f2cb369f870a74d2c19fdf943be5ecf5ae54a 100644
--- a/crypto/rmd320.c
+++ b/crypto/rmd320.c
@@ -395,3 +395,4 @@ module_exit(rmd320_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
+MODULE_ALIAS_CRYPTO("rmd320");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index 9a4770c022841542fc8a2a11e7f5c32fa8ae6ac3..3d0f9df30ac9fe368baa63598db9426c2cd8657a 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -248,4 +248,4 @@ module_exit(salsa20_generic_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
-MODULE_ALIAS("salsa20");
+MODULE_ALIAS_CRYPTO("salsa20");
diff --git a/crypto/seed.c b/crypto/seed.c
index 9c904d6d215140d38039aacd49922f374aac4ad6..c6ba8438be430f59988e52a158d306dcb832c5fe 100644
--- a/crypto/seed.c
+++ b/crypto/seed.c
@@ -476,3 +476,4 @@ module_exit(seed_fini);
 MODULE_DESCRIPTION("SEED Cipher Algorithm");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Hye-Shik Chang <perky@FreeBSD.org>, Kim Hyun <hkim@kisa.or.kr>");
+MODULE_ALIAS_CRYPTO("seed");
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index ee190fcedcd2e75ceaae2ededb512bf9987cc283..9daa854cc485b61e97eb7b25ead68da14a47ce88 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -362,3 +362,4 @@ module_exit(seqiv_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Sequence Number IV Generator");
+MODULE_ALIAS_CRYPTO("seqiv");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index 7ddbd7e888595dfb96ef80423d01e685278445b7..a53b5e2af335c95d046b85c0162dd0a5bb25e5e4 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -665,5 +665,5 @@ module_exit(serpent_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
 MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
-MODULE_ALIAS("tnepres");
-MODULE_ALIAS("serpent");
+MODULE_ALIAS_CRYPTO("tnepres");
+MODULE_ALIAS_CRYPTO("serpent");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 7bb04743278216e99a011adafa8ce8d3d97c7b4d..039e58cfa155655f42aec3ddcb8d2761aa22b264 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -153,4 +153,4 @@ module_exit(sha1_generic_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
 
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 65e7b76b057fcddc8f88a04d54b678df245a8c85..5eb21b1200333e95c73f11d3343183c37331544c 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -384,5 +384,5 @@ module_exit(sha256_generic_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
 
-MODULE_ALIAS("sha224");
-MODULE_ALIAS("sha256");
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 95db67197cd99dd1fd0cd538311f6e3f2202e7f3..8d0b19ed4f4b3fb90df2266132f5877a488b1e1c 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -288,5 +288,5 @@ module_exit(sha512_generic_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
 
-MODULE_ALIAS("sha384");
-MODULE_ALIAS("sha512");
+MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index 890449e6e7efa85f8b80209eaa002a9babdf6faf..1d864e988ea9f44b76db78c7f2395fecb1623089 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1225,15 +1225,22 @@ static inline int tcrypt_test(const char *alg)
 	return ret;
 }
 
-static int do_test(int m)
+static int do_test(const char *alg, u32 type, u32 mask, int m)
 {
 	int i;
 	int ret = 0;
 
 	switch (m) {
 	case 0:
+		if (alg) {
+			if (!crypto_has_alg(alg, type,
+					    mask ?: CRYPTO_ALG_TYPE_MASK))
+				ret = -ENOENT;
+			break;
+		}
+
 		for (i = 1; i < 200; i++)
-			ret += do_test(i);
+			ret += do_test(NULL, 0, 0, i);
 		break;
 
 	case 1:
@@ -1752,6 +1759,11 @@ static int do_test(int m)
 		break;
 
 	case 300:
+		if (alg) {
+			test_hash_speed(alg, sec, generic_hash_speed_template);
+			break;
+		}
+
 		/* fall through */
 
 	case 301:
@@ -1838,6 +1850,11 @@ static int do_test(int m)
 		break;
 
 	case 400:
+		if (alg) {
+			test_ahash_speed(alg, sec, generic_hash_speed_template);
+			break;
+		}
+
 		/* fall through */
 
 	case 401:
@@ -2127,12 +2144,6 @@ static int do_test(int m)
 	return ret;
 }
 
-static int do_alg_test(const char *alg, u32 type, u32 mask)
-{
-	return crypto_has_alg(alg, type, mask ?: CRYPTO_ALG_TYPE_MASK) ?
-	       0 : -ENOENT;
-}
-
 static int __init tcrypt_mod_init(void)
 {
 	int err = -ENOMEM;
@@ -2144,10 +2155,7 @@ static int __init tcrypt_mod_init(void)
 			goto err_free_tv;
 	}
 
-	if (alg)
-		err = do_alg_test(alg, type, mask);
-	else
-		err = do_test(mode);
+	err = do_test(alg, type, mask, mode);
 
 	if (err) {
 		printk(KERN_ERR "tcrypt: one or more tests failed!\n");
diff --git a/crypto/tea.c b/crypto/tea.c
index 0a572323ee4a9e88cdb2d3e4bc92e56235af8a6a..495be2d0077d4a2828323d2d9ec187964cd74948 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -270,8 +270,8 @@ static void __exit tea_mod_fini(void)
 	crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
 }
 
-MODULE_ALIAS("xtea");
-MODULE_ALIAS("xeta");
+MODULE_ALIAS_CRYPTO("xtea");
+MODULE_ALIAS_CRYPTO("xeta");
 
 module_init(tea_mod_init);
 module_exit(tea_mod_fini);
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 29a0cbdd0d19b1ddca6ee34c402ac7263d1647d4..037368d34586706e69275e1319f4514ad32d05a4 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -3708,8 +3708,7 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
 		panic("%s: %s alg self test failed in fips mode!\n", driver, alg);
 
 	if (fips_enabled && !rc)
-		pr_info(KERN_INFO "alg: self-tests for %s (%s) passed\n",
-			driver, alg);
+		pr_info("alg: self-tests for %s (%s) passed\n", driver, alg);
 
 	return rc;
 
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 3c7af0d1ff7a6f396538a41a89e0dd5a9405e99e..6e5651c66cf8a783b235e1f8551154e8e01641de 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -676,8 +676,8 @@ static void __exit tgr192_mod_fini(void)
 	crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
 }
 
-MODULE_ALIAS("tgr160");
-MODULE_ALIAS("tgr128");
+MODULE_ALIAS_CRYPTO("tgr160");
+MODULE_ALIAS_CRYPTO("tgr128");
 
 module_init(tgr192_mod_init);
 module_exit(tgr192_mod_fini);
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 2d5000552d0f93b1bacb1e799e50605743c4d969..523ad8c4e35918329cc08ef979d58a678f52dc5d 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -211,4 +211,4 @@ module_exit(twofish_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
-MODULE_ALIAS("twofish");
+MODULE_ALIAS_CRYPTO("twofish");
diff --git a/crypto/vmac.c b/crypto/vmac.c
index d84c24bd7ff7bb3752484a175aee7a7aadd28b21..df76a816cfb22f68ac173d3ef01e2e2f9e166c72 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -713,3 +713,4 @@ module_exit(vmac_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("VMAC hash algorithm");
+MODULE_ALIAS_CRYPTO("vmac");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index ec64e7762fbb55a579fefd781757a9d29182f5da..0de42eb3d0400b895de0cf8e70e1015dd137ff87 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1167,8 +1167,8 @@ static void __exit wp512_mod_fini(void)
 	crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
 }
 
-MODULE_ALIAS("wp384");
-MODULE_ALIAS("wp256");
+MODULE_ALIAS_CRYPTO("wp384");
+MODULE_ALIAS_CRYPTO("wp256");
 
 module_init(wp512_mod_init);
 module_exit(wp512_mod_fini);
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index a5fbdf3738cfd4fa8f0938f53ceabd8fb4d9e2ea..df90b332554cf43fb595623af3e854534a9dc49b 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -286,3 +286,4 @@ module_exit(crypto_xcbc_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("XCBC keyed hash algorithm");
+MODULE_ALIAS_CRYPTO("xcbc");
diff --git a/crypto/xts.c b/crypto/xts.c
index ca1608f44cb56617d1b9e1d721d4a6dc12bca041..f6fd43f100c8c68c7150cad5224fafc5dc61db2c 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -362,3 +362,4 @@ module_exit(crypto_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("XTS block cipher mode");
+MODULE_ALIAS_CRYPTO("xts");
diff --git a/crypto/zlib.c b/crypto/zlib.c
index c9ee681d57fdf5dd6fb39081d4af3ad02be39d97..0eefa9d237ace7f3b607bbfcc4f657580eaa60a0 100644
--- a/crypto/zlib.c
+++ b/crypto/zlib.c
@@ -378,3 +378,4 @@ module_exit(zlib_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Zlib Compression Algorithm");
 MODULE_AUTHOR("Sony Corporation");
+MODULE_ALIAS_CRYPTO("zlib");
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 91a04ae8003c80b9dcf403f5656ea328e5c0ed9f..de57b38809c79c938bcb4d6aebf2a629e3923aa4 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -64,7 +64,7 @@ config HW_RANDOM_AMD
 
 config HW_RANDOM_ATMEL
 	tristate "Atmel Random Number Generator support"
-	depends on ARCH_AT91 && HAVE_CLK
+	depends on ARCH_AT91 && HAVE_CLK && OF
 	default HW_RANDOM
 	---help---
 	  This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 851bc7e20ad267974d2c5fd1b77a3794fcf7f7e1..0bb0b2120a63bb2208f4af614eeba4672d88ec54 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -67,7 +67,7 @@ static int atmel_trng_probe(struct platform_device *pdev)
 	if (IS_ERR(trng->clk))
 		return PTR_ERR(trng->clk);
 
-	ret = clk_enable(trng->clk);
+	ret = clk_prepare_enable(trng->clk);
 	if (ret)
 		return ret;
 
@@ -95,7 +95,7 @@ static int atmel_trng_remove(struct platform_device *pdev)
 	hwrng_unregister(&trng->rng);
 
 	writel(TRNG_KEY, trng->base + TRNG_CR);
-	clk_disable(trng->clk);
+	clk_disable_unprepare(trng->clk);
 
 	return 0;
 }
@@ -105,7 +105,7 @@ static int atmel_trng_suspend(struct device *dev)
 {
 	struct atmel_trng *trng = dev_get_drvdata(dev);
 
-	clk_disable(trng->clk);
+	clk_disable_unprepare(trng->clk);
 
 	return 0;
 }
@@ -114,7 +114,7 @@ static int atmel_trng_resume(struct device *dev)
 {
 	struct atmel_trng *trng = dev_get_drvdata(dev);
 
-	return clk_enable(trng->clk);
+	return clk_prepare_enable(trng->clk);
 }
 
 static const struct dev_pm_ops atmel_trng_pm_ops = {
@@ -123,6 +123,12 @@ static const struct dev_pm_ops atmel_trng_pm_ops = {
 };
 #endif /* CONFIG_PM */
 
+static const struct of_device_id atmel_trng_dt_ids[] = {
+	{ .compatible = "atmel,at91sam9g45-trng" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_trng_dt_ids);
+
 static struct platform_driver atmel_trng_driver = {
 	.probe		= atmel_trng_probe,
 	.remove		= atmel_trng_remove,
@@ -132,6 +138,7 @@ static struct platform_driver atmel_trng_driver = {
 #ifdef CONFIG_PM
 		.pm	= &atmel_trng_pm_ops,
 #endif /* CONFIG_PM */
+		.of_match_table = atmel_trng_dt_ids,
 	},
 };
 
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index aa30a25c8d49bcc2298a64d86d680dab2bdc370c..1500cfd799a79ee4f7d221ca8768469a35f00035 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -281,7 +281,6 @@ static ssize_t hwrng_attr_available_show(struct device *dev,
 					 char *buf)
 {
 	int err;
-	ssize_t ret = 0;
 	struct hwrng *rng;
 
 	err = mutex_lock_interruptible(&rng_mutex);
@@ -289,16 +288,13 @@ static ssize_t hwrng_attr_available_show(struct device *dev,
 		return -ERESTARTSYS;
 	buf[0] = '\0';
 	list_for_each_entry(rng, &rng_list, list) {
-		strncat(buf, rng->name, PAGE_SIZE - ret - 1);
-		ret += strlen(rng->name);
-		strncat(buf, " ", PAGE_SIZE - ret - 1);
-		ret++;
+		strlcat(buf, rng->name, PAGE_SIZE);
+		strlcat(buf, " ", PAGE_SIZE);
 	}
-	strncat(buf, "\n", PAGE_SIZE - ret - 1);
-	ret++;
+	strlcat(buf, "\n", PAGE_SIZE);
 	mutex_unlock(&rng_mutex);
 
-	return ret;
+	return strlen(buf);
 }
 
 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index b099e33cb07303dff8e32da36fdfc589426d0bc0..e96eddc0e0b33784881c68290758f3c5d4d6bbe0 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -21,13 +21,13 @@
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
-#include <linux/unaligned/access_ok.h>
 #include <linux/crypto.h>
 #include <linux/cryptohash.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/algapi.h>
 #include <crypto/hash.h>
 #include <crypto/internal/hash.h>
+#include <asm/unaligned.h>
 
 #include <asm/dma.h>
 #include <asm/portmux.h>
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index a80ea853701db2f430dee148da1a9a39734ef34c..3187400daf319743fa8592b643014b7f5f8d90fd 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -60,6 +60,7 @@
 #define CAAM_CRA_PRIORITY		3000
 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
 #define CAAM_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + \
+					 CTR_RFC3686_NONCE_SIZE + \
 					 SHA512_DIGEST_SIZE * 2)
 /* max IV is max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
 #define CAAM_MAX_IV_LENGTH		16
@@ -70,17 +71,34 @@
 #define DESC_AEAD_DEC_LEN		(DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
 #define DESC_AEAD_GIVENC_LEN		(DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
 
+/* Note: Nonce is counted in enckeylen */
+#define DESC_AEAD_CTR_RFC3686_LEN	(6 * CAAM_CMD_SZ)
+
 #define DESC_AEAD_NULL_BASE		(3 * CAAM_CMD_SZ)
 #define DESC_AEAD_NULL_ENC_LEN		(DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
 #define DESC_AEAD_NULL_DEC_LEN		(DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
 
+#define DESC_GCM_BASE			(3 * CAAM_CMD_SZ)
+#define DESC_GCM_ENC_LEN		(DESC_GCM_BASE + 23 * CAAM_CMD_SZ)
+#define DESC_GCM_DEC_LEN		(DESC_GCM_BASE + 19 * CAAM_CMD_SZ)
+
+#define DESC_RFC4106_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_RFC4106_ENC_LEN		(DESC_RFC4106_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_RFC4106_DEC_LEN		(DESC_RFC4106_BASE + 14 * CAAM_CMD_SZ)
+#define DESC_RFC4106_GIVENC_LEN		(DESC_RFC4106_BASE + 21 * CAAM_CMD_SZ)
+
+#define DESC_RFC4543_BASE		(3 * CAAM_CMD_SZ)
+#define DESC_RFC4543_ENC_LEN		(DESC_RFC4543_BASE + 25 * CAAM_CMD_SZ)
+#define DESC_RFC4543_DEC_LEN		(DESC_RFC4543_BASE + 27 * CAAM_CMD_SZ)
+#define DESC_RFC4543_GIVENC_LEN		(DESC_RFC4543_BASE + 30 * CAAM_CMD_SZ)
+
 #define DESC_ABLKCIPHER_BASE		(3 * CAAM_CMD_SZ)
 #define DESC_ABLKCIPHER_ENC_LEN		(DESC_ABLKCIPHER_BASE + \
 					 20 * CAAM_CMD_SZ)
 #define DESC_ABLKCIPHER_DEC_LEN		(DESC_ABLKCIPHER_BASE + \
 					 15 * CAAM_CMD_SZ)
 
-#define DESC_MAX_USED_BYTES		(DESC_AEAD_GIVENC_LEN + \
+#define DESC_MAX_USED_BYTES		(DESC_RFC4543_GIVENC_LEN + \
 					 CAAM_MAX_KEY_SIZE)
 #define DESC_MAX_USED_LEN		(DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
 
@@ -128,11 +146,13 @@ static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
 /*
  * For aead encrypt and decrypt, read iv for both classes
  */
-static inline void aead_append_ld_iv(u32 *desc, int ivsize)
+static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset)
 {
-	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
-		   LDST_CLASS_1_CCB | ivsize);
-	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | ivsize);
+	append_seq_load(desc, ivsize, LDST_CLASS_1_CCB |
+			LDST_SRCDST_BYTE_CONTEXT |
+			(ivoffset << LDST_OFFSET_SHIFT));
+	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO |
+		    (ivoffset << MOVE_OFFSET_SHIFT) | ivsize);
 }
 
 /*
@@ -178,35 +198,60 @@ struct caam_ctx {
 };
 
 static void append_key_aead(u32 *desc, struct caam_ctx *ctx,
-			    int keys_fit_inline)
+			    int keys_fit_inline, bool is_rfc3686)
 {
+	u32 *nonce;
+	unsigned int enckeylen = ctx->enckeylen;
+
+	/*
+	 * RFC3686 specific:
+	 *	| ctx->key = {AUTH_KEY, ENC_KEY, NONCE}
+	 *	| enckeylen = encryption key size + nonce size
+	 */
+	if (is_rfc3686)
+		enckeylen -= CTR_RFC3686_NONCE_SIZE;
+
 	if (keys_fit_inline) {
 		append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
 				  ctx->split_key_len, CLASS_2 |
 				  KEY_DEST_MDHA_SPLIT | KEY_ENC);
 		append_key_as_imm(desc, (void *)ctx->key +
-				  ctx->split_key_pad_len, ctx->enckeylen,
-				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+				  ctx->split_key_pad_len, enckeylen,
+				  enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
 	} else {
 		append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
 			   KEY_DEST_MDHA_SPLIT | KEY_ENC);
 		append_key(desc, ctx->key_dma + ctx->split_key_pad_len,
-			   ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+			   enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	}
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686) {
+		nonce = (u32 *)((void *)ctx->key + ctx->split_key_pad_len +
+			       enckeylen);
+		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc,
+			    MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX |
+			    (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
 	}
 }
 
 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
-				  int keys_fit_inline)
+				  int keys_fit_inline, bool is_rfc3686)
 {
 	u32 *key_jump_cmd;
 
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
+	/* Note: Context registers are saved. */
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
 
 	/* Skip if already shared */
 	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
 				   JUMP_COND_SHRD);
 
-	append_key_aead(desc, ctx, keys_fit_inline);
+	append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 
 	set_jump_tgt_here(desc, key_jump_cmd);
 }
@@ -406,10 +451,17 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 {
 	struct aead_tfm *tfm = &aead->base.crt_aead;
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct crypto_tfm *ctfm = crypto_aead_tfm(aead);
+	const char *alg_name = crypto_tfm_alg_name(ctfm);
 	struct device *jrdev = ctx->jrdev;
-	bool keys_fit_inline = false;
+	bool keys_fit_inline;
 	u32 geniv, moveiv;
+	u32 ctx1_iv_off = 0;
 	u32 *desc;
+	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = (ctr_mode &&
+				 (strstr(alg_name, "rfc3686") != NULL));
 
 	if (!ctx->authsize)
 		return 0;
@@ -418,19 +470,37 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	if (!ctx->enckeylen)
 		return aead_null_set_sh_desc(aead);
 
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ctx1_iv_off = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 */
+	if (is_rfc3686)
+		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+
 	/*
 	 * Job Descriptor and Shared Descriptors
 	 * must all fit into the 64-word Descriptor h/w Buffer
 	 */
+	keys_fit_inline = false;
 	if (DESC_AEAD_ENC_LEN + DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len + ctx->enckeylen <=
+	    ctx->split_key_pad_len + ctx->enckeylen +
+	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    CAAM_DESC_BYTES_MAX)
 		keys_fit_inline = true;
 
 	/* aead_encrypt shared descriptor */
 	desc = ctx->sh_desc_enc;
 
-	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 
 	/* Class 2 operation */
 	append_operation(desc, ctx->class2_alg_type |
@@ -448,7 +518,15 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	/* read assoc before reading payload */
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 			     KEY_VLF);
-	aead_append_ld_iv(desc, tfm->ivsize);
+	aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+				    LDST_CLASS_1_CCB |
+				    LDST_SRCDST_BYTE_CONTEXT |
+				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				     LDST_OFFSET_SHIFT));
 
 	/* Class 1 operation */
 	append_operation(desc, ctx->class1_alg_type |
@@ -482,14 +560,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	 */
 	keys_fit_inline = false;
 	if (DESC_AEAD_DEC_LEN + DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len + ctx->enckeylen <=
+	    ctx->split_key_pad_len + ctx->enckeylen +
+	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    CAAM_DESC_BYTES_MAX)
 		keys_fit_inline = true;
 
 	/* aead_decrypt shared descriptor */
 	desc = ctx->sh_desc_dec;
 
-	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 
 	/* Class 2 operation */
 	append_operation(desc, ctx->class2_alg_type |
@@ -506,9 +586,22 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 			     KEY_VLF);
 
-	aead_append_ld_iv(desc, tfm->ivsize);
+	aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off);
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+				    LDST_CLASS_1_CCB |
+				    LDST_SRCDST_BYTE_CONTEXT |
+				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				     LDST_OFFSET_SHIFT));
 
-	append_dec_op1(desc, ctx->class1_alg_type);
+	/* Choose operation */
+	if (ctr_mode)
+		append_operation(desc, ctx->class1_alg_type |
+				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+	else
+		append_dec_op1(desc, ctx->class1_alg_type);
 
 	/* Read and write cryptlen bytes */
 	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
@@ -538,14 +631,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	 */
 	keys_fit_inline = false;
 	if (DESC_AEAD_GIVENC_LEN + DESC_JOB_IO_LEN +
-	    ctx->split_key_pad_len + ctx->enckeylen <=
+	    ctx->split_key_pad_len + ctx->enckeylen +
+	    (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0) <=
 	    CAAM_DESC_BYTES_MAX)
 		keys_fit_inline = true;
 
 	/* aead_givencrypt shared descriptor */
 	desc = ctx->sh_desc_givenc;
 
-	init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
+	/* Note: Context registers are saved. */
+	init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
 
 	/* Generate IV */
 	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
@@ -554,13 +649,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
 			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
 	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
-	append_move(desc, MOVE_SRC_INFIFO |
-		    MOVE_DEST_CLASS1CTX | (tfm->ivsize << MOVE_LEN_SHIFT));
+	append_move(desc, MOVE_WAITCOMP |
+		    MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+		    (tfm->ivsize << MOVE_LEN_SHIFT));
 	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
 
 	/* Copy IV to class 1 context */
-	append_move(desc, MOVE_SRC_CLASS1CTX |
-		    MOVE_DEST_OUTFIFO | (tfm->ivsize << MOVE_LEN_SHIFT));
+	append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT) |
+		    (tfm->ivsize << MOVE_LEN_SHIFT));
 
 	/* Return to encryption */
 	append_operation(desc, ctx->class2_alg_type |
@@ -576,7 +674,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
 			     KEY_VLF);
 
-	/* Copy iv from class 1 ctx to class 2 fifo*/
+	/* Copy iv from outfifo to class 2 fifo */
 	moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 |
 		 NFIFOENTRY_DTYPE_MSG | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
 	append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB |
@@ -584,6 +682,14 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
 	append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB |
 			    LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM);
 
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+				    LDST_CLASS_1_CCB |
+				    LDST_SRCDST_BYTE_CONTEXT |
+				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				     LDST_OFFSET_SHIFT));
+
 	/* Class 1 operation */
 	append_operation(desc, ctx->class1_alg_type |
 			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
@@ -630,127 +736,113 @@ static int aead_setauthsize(struct crypto_aead *authenc,
 	return 0;
 }
 
-static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
-			      u32 authkeylen)
-{
-	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
-			       ctx->split_key_pad_len, key_in, authkeylen,
-			       ctx->alg_op);
-}
-
-static int aead_setkey(struct crypto_aead *aead,
-			       const u8 *key, unsigned int keylen)
+static int gcm_set_sh_desc(struct crypto_aead *aead)
 {
-	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
-	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+	struct aead_tfm *tfm = &aead->base.crt_aead;
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
-	struct crypto_authenc_keys keys;
-	int ret = 0;
+	bool keys_fit_inline = false;
+	u32 *key_jump_cmd, *zero_payload_jump_cmd,
+	    *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2;
+	u32 *desc;
 
-	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
-		goto badkey;
+	if (!ctx->enckeylen || !ctx->authsize)
+		return 0;
 
-	/* Pick class 2 key length from algorithm submask */
-	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
-				      OP_ALG_ALGSEL_SHIFT] * 2;
-	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+	/*
+	 * AES GCM encrypt shared descriptor
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (DESC_GCM_ENC_LEN + DESC_JOB_IO_LEN +
+	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+		keys_fit_inline = true;
 
-	if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
-		goto badkey;
+	desc = ctx->sh_desc_enc;
 
-#ifdef DEBUG
-	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
-	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
-	       keys.authkeylen);
-	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
-	       ctx->split_key_len, ctx->split_key_pad_len);
-	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
 
-	ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
-	if (ret) {
-		goto badkey;
-	}
+	/* skip key loading if they are loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD | JUMP_COND_SELF);
+	if (keys_fit_inline)
+		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, ctx->key_dma, ctx->enckeylen,
+			   CLASS_1 | KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
 
-	/* postpend encryption key to auth split key */
-	memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
+	/* class 1 operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
 
-	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
-				      keys.enckeylen, DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, ctx->key_dma)) {
-		dev_err(jrdev, "unable to map key i/o memory\n");
-		return -ENOMEM;
-	}
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
-		       ctx->split_key_pad_len + keys.enckeylen, 1);
-#endif
+	/* cryptlen = seqoutlen - authsize */
+	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
 
-	ctx->enckeylen = keys.enckeylen;
+	/* assoclen + cryptlen = seqinlen - ivsize */
+	append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
 
-	ret = aead_set_sh_desc(aead);
-	if (ret) {
-		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
-				 keys.enckeylen, DMA_TO_DEVICE);
-	}
+	/* assoclen = (assoclen + cryptlen) - cryptlen */
+	append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ);
 
-	return ret;
-badkey:
-	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
-	return -EINVAL;
-}
+	/* if cryptlen is ZERO jump to zero-payload commands */
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+					    JUMP_COND_MATH_Z);
+	/* read IV */
+	append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+	/* if assoclen is ZERO, skip reading the assoc data */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+					   JUMP_COND_MATH_Z);
+
+	/* read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
 
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
-			     const u8 *key, unsigned int keylen)
-{
-	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-	struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
-	struct device *jrdev = ctx->jrdev;
-	int ret = 0;
-	u32 *key_jump_cmd;
-	u32 *desc;
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
 
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
-#endif
+	/* write encrypted data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
 
-	memcpy(ctx->key, key, keylen);
-	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
-				      DMA_TO_DEVICE);
-	if (dma_mapping_error(jrdev, ctx->key_dma)) {
-		dev_err(jrdev, "unable to map key i/o memory\n");
-		return -ENOMEM;
-	}
-	ctx->enckeylen = keylen;
+	/* read payload data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
 
-	/* ablkcipher_encrypt shared descriptor */
-	desc = ctx->sh_desc_enc;
-	init_sh_desc(desc, HDR_SHARE_SERIAL);
-	/* Skip if already shared */
-	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-				   JUMP_COND_SHRD);
+	/* jump the zero-payload commands */
+	append_jump(desc, JUMP_TEST_ALL | 7);
 
-	/* Load class1 key only */
-	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-			  ctx->enckeylen, CLASS_1 |
-			  KEY_DEST_CLASS_REG);
+	/* zero-payload commands */
+	set_jump_tgt_here(desc, zero_payload_jump_cmd);
 
-	set_jump_tgt_here(desc, key_jump_cmd);
+	/* if assoclen is ZERO, jump to IV reading - is the only input data */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+	zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+					   JUMP_COND_MATH_Z);
+	/* read IV */
+	append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
 
-	/* Load iv */
-	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
-		   LDST_CLASS_1_CCB | tfm->ivsize);
+	/* read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1);
 
-	/* Load operation */
-	append_operation(desc, ctx->class1_alg_type |
-			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+	/* jump to ICV writing */
+	append_jump(desc, JUMP_TEST_ALL | 2);
 
-	/* Perform operation */
-	ablkcipher_append_src_dst(desc);
+	/* read IV - is the only input data */
+	set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+	append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 |
+			     FIFOLD_TYPE_LAST1);
+
+	/* write ICV */
+	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
 
 	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
@@ -760,35 +852,93 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 		return -ENOMEM;
 	}
 #ifdef DEBUG
-	print_hex_dump(KERN_ERR,
-		       "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
+	print_hex_dump(KERN_ERR, "gcm enc shdesc@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
 		       desc_bytes(desc), 1);
 #endif
-	/* ablkcipher_decrypt shared descriptor */
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	keys_fit_inline = false;
+	if (DESC_GCM_DEC_LEN + DESC_JOB_IO_LEN +
+	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+		keys_fit_inline = true;
+
 	desc = ctx->sh_desc_dec;
 
 	init_sh_desc(desc, HDR_SHARE_SERIAL);
-	/* Skip if already shared */
-	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
-				   JUMP_COND_SHRD);
-
-	/* Load class1 key only */
-	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
-			  ctx->enckeylen, CLASS_1 |
-			  KEY_DEST_CLASS_REG);
 
+	/* skip key loading if they are loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL |
+				   JUMP_TEST_ALL | JUMP_COND_SHRD |
+				   JUMP_COND_SELF);
+	if (keys_fit_inline)
+		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, ctx->key_dma, ctx->enckeylen,
+			   CLASS_1 | KEY_DEST_CLASS_REG);
 	set_jump_tgt_here(desc, key_jump_cmd);
 
-	/* load IV */
-	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
-		   LDST_CLASS_1_CCB | tfm->ivsize);
+	/* class 1 operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
 
-	/* Choose operation */
-	append_dec_op1(desc, ctx->class1_alg_type);
+	/* assoclen + cryptlen = seqinlen - ivsize - icvsize */
+	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+				ctx->authsize + tfm->ivsize);
 
-	/* Perform operation */
-	ablkcipher_append_src_dst(desc);
+	/* assoclen = (assoclen + cryptlen) - cryptlen */
+	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ);
+
+	/* read IV */
+	append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+	/* jump to zero-payload command if cryptlen is zero */
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+	zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL |
+					    JUMP_COND_MATH_Z);
+
+	append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+	/* if asoclen is ZERO, skip reading assoc data */
+	zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL |
+					   JUMP_COND_MATH_Z);
+	/* read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+	set_jump_tgt_here(desc, zero_assoc_jump_cmd1);
+
+	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+	/* store encrypted data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* read payload data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	/* jump the zero-payload commands */
+	append_jump(desc, JUMP_TEST_ALL | 4);
+
+	/* zero-payload command */
+	set_jump_tgt_here(desc, zero_payload_jump_cmd);
+
+	/* if assoclen is ZERO, jump to ICV reading */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ);
+	zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL |
+					   JUMP_COND_MATH_Z);
+	/* read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+	set_jump_tgt_here(desc, zero_assoc_jump_cmd2);
+
+	/* read ICV */
+	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
 
 	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
 					      desc_bytes(desc),
@@ -797,75 +947,1192 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
 		dev_err(jrdev, "unable to map shared descriptor\n");
 		return -ENOMEM;
 	}
-
 #ifdef DEBUG
-	print_hex_dump(KERN_ERR,
-		       "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
+	print_hex_dump(KERN_ERR, "gcm dec shdesc@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
 		       desc_bytes(desc), 1);
 #endif
 
-	return ret;
+	return 0;
 }
 
-/*
- * aead_edesc - s/w-extended aead descriptor
- * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
- * @assoc_chained: if source is chained
- * @src_nents: number of segments in input scatterlist
- * @src_chained: if source is chained
- * @dst_nents: number of segments in output scatterlist
- * @dst_chained: if destination is chained
- * @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
- * @sec4_sg_bytes: length of dma mapped sec4_sg space
- * @sec4_sg_dma: bus physical mapped address of h/w link table
- * @hw_desc: the h/w job descriptor followed by any referenced link tables
- */
-struct aead_edesc {
-	int assoc_nents;
-	bool assoc_chained;
-	int src_nents;
-	bool src_chained;
-	int dst_nents;
-	bool dst_chained;
-	dma_addr_t iv_dma;
-	int sec4_sg_bytes;
-	dma_addr_t sec4_sg_dma;
-	struct sec4_sg_entry *sec4_sg;
-	u32 hw_desc[0];
-};
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
 
-/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
- * @src_nents: number of segments in input scatterlist
- * @src_chained: if source is chained
- * @dst_nents: number of segments in output scatterlist
- * @dst_chained: if destination is chained
- * @iv_dma: dma address of iv for checking continuity and link table
- * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
- * @sec4_sg_bytes: length of dma mapped sec4_sg space
- * @sec4_sg_dma: bus physical mapped address of h/w link table
- * @hw_desc: the h/w job descriptor followed by any referenced link tables
- */
-struct ablkcipher_edesc {
-	int src_nents;
-	bool src_chained;
-	int dst_nents;
-	bool dst_chained;
-	dma_addr_t iv_dma;
-	int sec4_sg_bytes;
-	dma_addr_t sec4_sg_dma;
-	struct sec4_sg_entry *sec4_sg;
-	u32 hw_desc[0];
-};
+	ctx->authsize = authsize;
+	gcm_set_sh_desc(authenc);
 
-static void caam_unmap(struct device *dev, struct scatterlist *src,
-		       struct scatterlist *dst, int src_nents,
-		       bool src_chained, int dst_nents, bool dst_chained,
-		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
-		       int sec4_sg_bytes)
-{
+	return 0;
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+	struct aead_tfm *tfm = &aead->base.crt_aead;
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	bool keys_fit_inline = false;
+	u32 *key_jump_cmd, *move_cmd, *write_iv_cmd;
+	u32 *desc;
+	u32 geniv;
+
+	if (!ctx->enckeylen || !ctx->authsize)
+		return 0;
+
+	/*
+	 * RFC4106 encrypt shared descriptor
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (DESC_RFC4106_ENC_LEN + DESC_JOB_IO_LEN +
+	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+		keys_fit_inline = true;
+
+	desc = ctx->sh_desc_enc;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (keys_fit_inline)
+		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, ctx->key_dma, ctx->enckeylen,
+			   CLASS_1 | KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+	/* cryptlen = seqoutlen - authsize */
+	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* assoclen + cryptlen = seqinlen - ivsize */
+	append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
+
+	/* assoclen = (assoclen + cryptlen) - cryptlen */
+	append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
+
+	/* Read Salt */
+	append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
+				4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
+	/* Read AES-GCM-ESP IV */
+	append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+	/* Read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+	/* Will read cryptlen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Write encrypted data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* Read payload data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+	/* Write ICV */
+	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+					      desc_bytes(desc),
+					      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "rfc4106 enc shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	keys_fit_inline = false;
+	if (DESC_RFC4106_DEC_LEN + DESC_JOB_IO_LEN +
+	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+		keys_fit_inline = true;
+
+	desc = ctx->sh_desc_dec;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL |
+				   JUMP_TEST_ALL | JUMP_COND_SHRD);
+	if (keys_fit_inline)
+		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, ctx->key_dma, ctx->enckeylen,
+			   CLASS_1 | KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	/* assoclen + cryptlen = seqinlen - ivsize - icvsize */
+	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+				ctx->authsize + tfm->ivsize);
+
+	/* assoclen = (assoclen + cryptlen) - cryptlen */
+	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+	/* Will write cryptlen bytes */
+	append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+
+	/* Read Salt */
+	append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen),
+				4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV);
+	/* Read AES-GCM-ESP IV */
+	append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1);
+
+	/* Read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+	/* Will read cryptlen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+	/* Store payload data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* Read encrypted data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+	/* Read ICV */
+	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+					      desc_bytes(desc),
+					      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "rfc4106 dec shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	keys_fit_inline = false;
+	if (DESC_RFC4106_GIVENC_LEN + DESC_JOB_IO_LEN +
+	    ctx->split_key_pad_len + ctx->enckeylen <=
+	    CAAM_DESC_BYTES_MAX)
+		keys_fit_inline = true;
+
+	/* rfc4106_givencrypt shared descriptor */
+	desc = ctx->sh_desc_givenc;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (keys_fit_inline)
+		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, ctx->key_dma, ctx->enckeylen,
+			   CLASS_1 | KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Generate IV */
+	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+		NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF |
+			       (tfm->ivsize << MOVE_LEN_SHIFT));
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Copy generated IV to OFIFO */
+	write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO |
+				   (tfm->ivsize << MOVE_LEN_SHIFT));
+
+	/* Class 1 operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+	/* ivsize + cryptlen = seqoutlen - authsize */
+	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+	/* assoclen = seqinlen - (ivsize + cryptlen) */
+	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+	/* Will write ivsize + cryptlen */
+	append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
+
+	/* Read Salt and generated IV */
+	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV |
+		   FIFOLD_TYPE_FLUSH1 | IMMEDIATE | 12);
+	/* Append Salt */
+	append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+	set_move_tgt_here(desc, move_cmd);
+	set_move_tgt_here(desc, write_iv_cmd);
+	/* Blank commands. Will be overwritten by generated IV. */
+	append_cmd(desc, 0x00000000);
+	append_cmd(desc, 0x00000000);
+	/* End of blank commands */
+
+	/* No need to reload iv */
+	append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
+
+	/* Read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1);
+
+	/* Will read cryptlen */
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Store generated IV and encrypted data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* Read payload data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1);
+
+	/* Write ICV */
+	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+						 desc_bytes(desc),
+						 DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4106 givenc shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+			       unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	rfc4106_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+	struct aead_tfm *tfm = &aead->base.crt_aead;
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	bool keys_fit_inline = false;
+	u32 *key_jump_cmd, *write_iv_cmd, *write_aad_cmd;
+	u32 *read_move_cmd, *write_move_cmd;
+	u32 *desc;
+	u32 geniv;
+
+	if (!ctx->enckeylen || !ctx->authsize)
+		return 0;
+
+	/*
+	 * RFC4543 encrypt shared descriptor
+	 * Job Descriptor and Shared Descriptor
+	 * must fit into the 64-word Descriptor h/w Buffer
+	 */
+	if (DESC_RFC4543_ENC_LEN + DESC_JOB_IO_LEN +
+	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+		keys_fit_inline = true;
+
+	desc = ctx->sh_desc_enc;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (keys_fit_inline)
+		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, ctx->key_dma, ctx->enckeylen,
+			   CLASS_1 | KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+	/* Load AES-GMAC ESP IV into Math1 register */
+	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
+		   LDST_CLASS_DECO | tfm->ivsize);
+
+	/* Wait the DMA transaction to finish */
+	append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
+		    (1 << JUMP_OFFSET_SHIFT));
+
+	/* Overwrite blank immediate AES-GMAC ESP IV data */
+	write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+				   (tfm->ivsize << MOVE_LEN_SHIFT));
+
+	/* Overwrite blank immediate AAD data */
+	write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+				    (tfm->ivsize << MOVE_LEN_SHIFT));
+
+	/* cryptlen = seqoutlen - authsize */
+	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+	/* assoclen = (seqinlen - ivsize) - cryptlen */
+	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+	/* Read Salt and AES-GMAC ESP IV */
+	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+		   FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+	/* Append Salt */
+	append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+	set_move_tgt_here(desc, write_iv_cmd);
+	/* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+	append_cmd(desc, 0x00000000);
+	append_cmd(desc, 0x00000000);
+	/* End of blank commands */
+
+	/* Read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD);
+
+	/* Will read cryptlen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/* Will write cryptlen bytes */
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+				     (0x8 << MOVE_LEN_SHIFT));
+
+	/* Authenticate AES-GMAC ESP IV  */
+	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+		   FIFOLD_TYPE_AAD | tfm->ivsize);
+	set_move_tgt_here(desc, write_aad_cmd);
+	/* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+	append_cmd(desc, 0x00000000);
+	append_cmd(desc, 0x00000000);
+	/* End of blank commands */
+
+	/* Read and write cryptlen bytes */
+	aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	/* Move payload data to OFIFO */
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+	/* Write ICV */
+	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+					      desc_bytes(desc),
+					      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "rfc4543 enc shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	keys_fit_inline = false;
+	if (DESC_RFC4543_DEC_LEN + DESC_JOB_IO_LEN +
+	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+		keys_fit_inline = true;
+
+	desc = ctx->sh_desc_dec;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL |
+				   JUMP_TEST_ALL | JUMP_COND_SHRD);
+	if (keys_fit_inline)
+		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, ctx->key_dma, ctx->enckeylen,
+			   CLASS_1 | KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Class 1 operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+	/* Load AES-GMAC ESP IV into Math1 register */
+	append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 |
+		   LDST_CLASS_DECO | tfm->ivsize);
+
+	/* Wait the DMA transaction to finish */
+	append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM |
+		    (1 << JUMP_OFFSET_SHIFT));
+
+	/* assoclen + cryptlen = (seqinlen - ivsize) - icvsize */
+	append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize);
+
+	/* Overwrite blank immediate AES-GMAC ESP IV data */
+	write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+				   (tfm->ivsize << MOVE_LEN_SHIFT));
+
+	/* Overwrite blank immediate AAD data */
+	write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+				    (tfm->ivsize << MOVE_LEN_SHIFT));
+
+	/* assoclen = (assoclen + cryptlen) - cryptlen */
+	append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+	append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+				     (0x8 << MOVE_LEN_SHIFT));
+
+	/* Read Salt and AES-GMAC ESP IV */
+	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+		   FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+	/* Append Salt */
+	append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+	set_move_tgt_here(desc, write_iv_cmd);
+	/* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+	append_cmd(desc, 0x00000000);
+	append_cmd(desc, 0x00000000);
+	/* End of blank commands */
+
+	/* Read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD);
+
+	/* Will read cryptlen bytes */
+	append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+	/* Will write cryptlen bytes */
+	append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+	/* Authenticate AES-GMAC ESP IV  */
+	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+		   FIFOLD_TYPE_AAD | tfm->ivsize);
+	set_move_tgt_here(desc, write_aad_cmd);
+	/* Blank commands. Will be overwritten by AES-GMAC ESP IV. */
+	append_cmd(desc, 0x00000000);
+	append_cmd(desc, 0x00000000);
+	/* End of blank commands */
+
+	/* Store payload data */
+	append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF);
+
+	/* In-snoop cryptlen data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	/* Move payload data to OFIFO */
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Read ICV */
+	append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 |
+			     FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1);
+
+	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+					      desc_bytes(desc),
+					      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "rfc4543 dec shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	/*
+	 * Job Descriptor and Shared Descriptors
+	 * must all fit into the 64-word Descriptor h/w Buffer
+	 */
+	keys_fit_inline = false;
+	if (DESC_RFC4543_GIVENC_LEN + DESC_JOB_IO_LEN +
+	    ctx->enckeylen <= CAAM_DESC_BYTES_MAX)
+		keys_fit_inline = true;
+
+	/* rfc4543_givencrypt shared descriptor */
+	desc = ctx->sh_desc_givenc;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+	/* Skip key loading if it is loaded due to sharing */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+	if (keys_fit_inline)
+		append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+				  ctx->enckeylen, CLASS_1 | KEY_DEST_CLASS_REG);
+	else
+		append_key(desc, ctx->key_dma, ctx->enckeylen,
+			   CLASS_1 | KEY_DEST_CLASS_REG);
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Generate IV */
+	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+		NFIFOENTRY_PTYPE_RND | (tfm->ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	/* Move generated IV to Math1 register */
+	append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 |
+		    (tfm->ivsize << MOVE_LEN_SHIFT));
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Overwrite blank immediate AES-GMAC IV data */
+	write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+				   (tfm->ivsize << MOVE_LEN_SHIFT));
+
+	/* Overwrite blank immediate AAD data */
+	write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF |
+				    (tfm->ivsize << MOVE_LEN_SHIFT));
+
+	/* Copy generated IV to OFIFO */
+	append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO |
+		    (tfm->ivsize << MOVE_LEN_SHIFT));
+
+	/* Class 1 operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+	/* ivsize + cryptlen = seqoutlen - authsize */
+	append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+	/* assoclen = seqinlen - (ivsize + cryptlen) */
+	append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+	/* Will write ivsize + cryptlen */
+	append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ);
+
+	/*
+	 * MOVE_LEN opcode is not available in all SEC HW revisions,
+	 * thus need to do some magic, i.e. self-patch the descriptor
+	 * buffer.
+	 */
+	read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 |
+				    (0x6 << MOVE_LEN_SHIFT));
+	write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF |
+				     (0x8 << MOVE_LEN_SHIFT));
+
+	/* Read Salt and AES-GMAC generated IV */
+	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+		   FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | (4 + tfm->ivsize));
+	/* Append Salt */
+	append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4);
+	set_move_tgt_here(desc, write_iv_cmd);
+	/* Blank commands. Will be overwritten by AES-GMAC generated IV. */
+	append_cmd(desc, 0x00000000);
+	append_cmd(desc, 0x00000000);
+	/* End of blank commands */
+
+	/* No need to reload iv */
+	append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP);
+
+	/* Read assoc data */
+	append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF |
+			     FIFOLD_TYPE_AAD);
+
+	/* Will read cryptlen */
+	append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+
+	/* Authenticate AES-GMAC IV  */
+	append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE |
+		   FIFOLD_TYPE_AAD | tfm->ivsize);
+	set_move_tgt_here(desc, write_aad_cmd);
+	/* Blank commands. Will be overwritten by AES-GMAC IV. */
+	append_cmd(desc, 0x00000000);
+	append_cmd(desc, 0x00000000);
+	/* End of blank commands */
+
+	/* Read and write cryptlen bytes */
+	aead_append_src_dst(desc, FIFOLD_TYPE_AAD);
+
+	set_move_tgt_here(desc, read_move_cmd);
+	set_move_tgt_here(desc, write_move_cmd);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	/* Move payload data to OFIFO */
+	append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO);
+
+	/* Write ICV */
+	append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB |
+			 LDST_SRCDST_BYTE_CONTEXT);
+
+	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+						 desc_bytes(desc),
+						 DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "rfc4543 givenc shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+			       unsigned int authsize)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	rfc4543_set_sh_desc(authenc);
+
+	return 0;
+}
+
+static u32 gen_split_aead_key(struct caam_ctx *ctx, const u8 *key_in,
+			      u32 authkeylen)
+{
+	return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len,
+			       ctx->split_key_pad_len, key_in, authkeylen,
+			       ctx->alg_op);
+}
+
+static int aead_setkey(struct crypto_aead *aead,
+			       const u8 *key, unsigned int keylen)
+{
+	/* Sizes for MDHA pads (*not* keys): MD5, SHA1, 224, 256, 384, 512 */
+	static const u8 mdpadlen[] = { 16, 20, 32, 32, 64, 64 };
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	struct crypto_authenc_keys keys;
+	int ret = 0;
+
+	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+		goto badkey;
+
+	/* Pick class 2 key length from algorithm submask */
+	ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >>
+				      OP_ALG_ALGSEL_SHIFT] * 2;
+	ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16);
+
+	if (ctx->split_key_pad_len + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+		goto badkey;
+
+#ifdef DEBUG
+	printk(KERN_ERR "keylen %d enckeylen %d authkeylen %d\n",
+	       keys.authkeylen + keys.enckeylen, keys.enckeylen,
+	       keys.authkeylen);
+	printk(KERN_ERR "split_key_len %d split_key_pad_len %d\n",
+	       ctx->split_key_len, ctx->split_key_pad_len);
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	ret = gen_split_aead_key(ctx, keys.authkey, keys.authkeylen);
+	if (ret) {
+		goto badkey;
+	}
+
+	/* postpend encryption key to auth split key */
+	memcpy(ctx->key + ctx->split_key_pad_len, keys.enckey, keys.enckeylen);
+
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len +
+				      keys.enckeylen, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->key_dma)) {
+		dev_err(jrdev, "unable to map key i/o memory\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+		       ctx->split_key_pad_len + keys.enckeylen, 1);
+#endif
+
+	ctx->enckeylen = keys.enckeylen;
+
+	ret = aead_set_sh_desc(aead);
+	if (ret) {
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len +
+				 keys.enckeylen, DMA_TO_DEVICE);
+	}
+
+	return ret;
+badkey:
+	crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+		      const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	int ret = 0;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+				      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->key_dma)) {
+		dev_err(jrdev, "unable to map key i/o memory\n");
+		return -ENOMEM;
+	}
+	ctx->enckeylen = keylen;
+
+	ret = gcm_set_sh_desc(aead);
+	if (ret) {
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+				 DMA_TO_DEVICE);
+	}
+
+	return ret;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+			  const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	int ret = 0;
+
+	if (keylen < 4)
+		return -EINVAL;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+
+	/*
+	 * The last four bytes of the key material are used as the salt value
+	 * in the nonce. Update the AES key length.
+	 */
+	ctx->enckeylen = keylen - 4;
+
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+				      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->key_dma)) {
+		dev_err(jrdev, "unable to map key i/o memory\n");
+		return -ENOMEM;
+	}
+
+	ret = rfc4106_set_sh_desc(aead);
+	if (ret) {
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+				 DMA_TO_DEVICE);
+	}
+
+	return ret;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+			  const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_aead_ctx(aead);
+	struct device *jrdev = ctx->jrdev;
+	int ret = 0;
+
+	if (keylen < 4)
+		return -EINVAL;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+
+	memcpy(ctx->key, key, keylen);
+
+	/*
+	 * The last four bytes of the key material are used as the salt value
+	 * in the nonce. Update the AES key length.
+	 */
+	ctx->enckeylen = keylen - 4;
+
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->enckeylen,
+				      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->key_dma)) {
+		dev_err(jrdev, "unable to map key i/o memory\n");
+		return -ENOMEM;
+	}
+
+	ret = rfc4543_set_sh_desc(aead);
+	if (ret) {
+		dma_unmap_single(jrdev, ctx->key_dma, ctx->enckeylen,
+				 DMA_TO_DEVICE);
+	}
+
+	return ret;
+}
+
+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
+			     const u8 *key, unsigned int keylen)
+{
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct ablkcipher_tfm *crt = &ablkcipher->base.crt_ablkcipher;
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
+	const char *alg_name = crypto_tfm_alg_name(tfm);
+	struct device *jrdev = ctx->jrdev;
+	int ret = 0;
+	u32 *key_jump_cmd;
+	u32 *desc;
+	u32 *nonce;
+	u32 geniv;
+	u32 ctx1_iv_off = 0;
+	const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
+			       OP_ALG_AAI_CTR_MOD128);
+	const bool is_rfc3686 = (ctr_mode &&
+				 (strstr(alg_name, "rfc3686") != NULL));
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+#endif
+	/*
+	 * AES-CTR needs to load IV in CONTEXT1 reg
+	 * at an offset of 128bits (16bytes)
+	 * CONTEXT1[255:128] = IV
+	 */
+	if (ctr_mode)
+		ctx1_iv_off = 16;
+
+	/*
+	 * RFC3686 specific:
+	 *	| CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+	 *	| *key = {KEY, NONCE}
+	 */
+	if (is_rfc3686) {
+		ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+		keylen -= CTR_RFC3686_NONCE_SIZE;
+	}
+
+	memcpy(ctx->key, key, keylen);
+	ctx->key_dma = dma_map_single(jrdev, ctx->key, keylen,
+				      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->key_dma)) {
+		dev_err(jrdev, "unable to map key i/o memory\n");
+		return -ENOMEM;
+	}
+	ctx->enckeylen = keylen;
+
+	/* ablkcipher_encrypt shared descriptor */
+	desc = ctx->sh_desc_enc;
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+			  ctx->enckeylen, CLASS_1 |
+			  KEY_DEST_CLASS_REG);
+
+	/* Load nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		nonce = (u32 *)(key + keylen);
+		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP |
+			    MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX |
+			    (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Load iv */
+	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+	/* Load counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+				    LDST_CLASS_1_CCB |
+				    LDST_SRCDST_BYTE_CONTEXT |
+				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				     LDST_OFFSET_SHIFT));
+
+	/* Load operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+	ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+					      desc_bytes(desc),
+					      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher enc shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+	/* ablkcipher_decrypt shared descriptor */
+	desc = ctx->sh_desc_dec;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+			  ctx->enckeylen, CLASS_1 |
+			  KEY_DEST_CLASS_REG);
+
+	/* Load nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		nonce = (u32 *)(key + keylen);
+		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP |
+			    MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX |
+			    (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* load IV */
+	append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT |
+			LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+	/* Load counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM |
+				    LDST_CLASS_1_CCB |
+				    LDST_SRCDST_BYTE_CONTEXT |
+				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				     LDST_OFFSET_SHIFT));
+
+	/* Choose operation */
+	if (ctr_mode)
+		append_operation(desc, ctx->class1_alg_type |
+				 OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT);
+	else
+		append_dec_op1(desc, ctx->class1_alg_type);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+	ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+					      desc_bytes(desc),
+					      DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher dec shdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+	/* ablkcipher_givencrypt shared descriptor */
+	desc = ctx->sh_desc_givenc;
+
+	init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
+	/* Skip if already shared */
+	key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+				   JUMP_COND_SHRD);
+
+	/* Load class1 key only */
+	append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
+			  ctx->enckeylen, CLASS_1 |
+			  KEY_DEST_CLASS_REG);
+
+	/* Load Nonce into CONTEXT1 reg */
+	if (is_rfc3686) {
+		nonce = (u32 *)(key + keylen);
+		append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
+				    LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
+		append_move(desc, MOVE_WAITCOMP |
+			    MOVE_SRC_OUTFIFO |
+			    MOVE_DEST_CLASS1CTX |
+			    (16 << MOVE_OFFSET_SHIFT) |
+			    (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
+	}
+	set_jump_tgt_here(desc, key_jump_cmd);
+
+	/* Generate IV */
+	geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
+		NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
+		NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
+	append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
+			    LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
+	append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+	append_move(desc, MOVE_WAITCOMP |
+		    MOVE_SRC_INFIFO |
+		    MOVE_DEST_CLASS1CTX |
+		    (crt->ivsize << MOVE_LEN_SHIFT) |
+		    (ctx1_iv_off << MOVE_OFFSET_SHIFT));
+	append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+	/* Copy generated IV to memory */
+	append_seq_store(desc, crt->ivsize,
+			 LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
+			 (ctx1_iv_off << LDST_OFFSET_SHIFT));
+
+	/* Load Counter into CONTEXT1 reg */
+	if (is_rfc3686)
+		append_load_imm_u32(desc, (u32)1, LDST_IMM |
+				    LDST_CLASS_1_CCB |
+				    LDST_SRCDST_BYTE_CONTEXT |
+				    ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
+				     LDST_OFFSET_SHIFT));
+
+	if (ctx1_iv_off)
+		append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
+			    (1 << JUMP_OFFSET_SHIFT));
+
+	/* Load operation */
+	append_operation(desc, ctx->class1_alg_type |
+			 OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+	/* Perform operation */
+	ablkcipher_append_src_dst(desc);
+
+	ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
+						 desc_bytes(desc),
+						 DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
+		dev_err(jrdev, "unable to map shared descriptor\n");
+		return -ENOMEM;
+	}
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, desc,
+		       desc_bytes(desc), 1);
+#endif
+
+	return ret;
+}
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist
+ * @assoc_chained: if source is chained
+ * @src_nents: number of segments in input scatterlist
+ * @src_chained: if source is chained
+ * @dst_nents: number of segments in output scatterlist
+ * @dst_chained: if destination is chained
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct aead_edesc {
+	int assoc_nents;
+	bool assoc_chained;
+	int src_nents;
+	bool src_chained;
+	int dst_nents;
+	bool dst_chained;
+	dma_addr_t iv_dma;
+	int sec4_sg_bytes;
+	dma_addr_t sec4_sg_dma;
+	struct sec4_sg_entry *sec4_sg;
+	u32 hw_desc[0];
+};
+
+/*
+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @src_chained: if source is chained
+ * @dst_nents: number of segments in output scatterlist
+ * @dst_chained: if destination is chained
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
+ * @sec4_sg_bytes: length of dma mapped sec4_sg space
+ * @sec4_sg_dma: bus physical mapped address of h/w link table
+ * @hw_desc: the h/w job descriptor followed by any referenced link tables
+ */
+struct ablkcipher_edesc {
+	int src_nents;
+	bool src_chained;
+	int dst_nents;
+	bool dst_chained;
+	dma_addr_t iv_dma;
+	int sec4_sg_bytes;
+	dma_addr_t sec4_sg_dma;
+	struct sec4_sg_entry *sec4_sg;
+	u32 hw_desc[0];
+};
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+		       struct scatterlist *dst, int src_nents,
+		       bool src_chained, int dst_nents, bool dst_chained,
+		       dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
+		       int sec4_sg_bytes)
+{
 	if (dst != src) {
 		dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE,
 				     src_chained);
@@ -1088,6 +2355,7 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
 	u32 out_options = 0, in_options;
 	dma_addr_t dst_dma, src_dma;
 	int len, sec4_sg_index = 0;
+	bool is_gcm = false;
 
 #ifdef DEBUG
 	debug("assoclen %d cryptlen %d authsize %d\n",
@@ -1106,11 +2374,19 @@ static void init_aead_job(u32 *sh_desc, dma_addr_t ptr,
 		       desc_bytes(sh_desc), 1);
 #endif
 
+	if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+	      OP_ALG_ALGSEL_AES) &&
+	    ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+		is_gcm = true;
+
 	len = desc_len(sh_desc);
 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
 
 	if (all_contig) {
-		src_dma = sg_dma_address(req->assoc);
+		if (is_gcm)
+			src_dma = edesc->iv_dma;
+		else
+			src_dma = sg_dma_address(req->assoc);
 		in_options = 0;
 	} else {
 		src_dma = edesc->sec4_sg_dma;
@@ -1164,6 +2440,7 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
 	u32 out_options = 0, in_options;
 	dma_addr_t dst_dma, src_dma;
 	int len, sec4_sg_index = 0;
+	bool is_gcm = false;
 
 #ifdef DEBUG
 	debug("assoclen %d cryptlen %d authsize %d\n",
@@ -1181,11 +2458,19 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
 		       desc_bytes(sh_desc), 1);
 #endif
 
+	if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+	      OP_ALG_ALGSEL_AES) &&
+	    ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+		is_gcm = true;
+
 	len = desc_len(sh_desc);
 	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
 
 	if (contig & GIV_SRC_CONTIG) {
-		src_dma = sg_dma_address(req->assoc);
+		if (is_gcm)
+			src_dma = edesc->iv_dma;
+		else
+			src_dma = sg_dma_address(req->assoc);
 		in_options = 0;
 	} else {
 		src_dma = edesc->sec4_sg_dma;
@@ -1200,7 +2485,8 @@ static void init_aead_giv_job(u32 *sh_desc, dma_addr_t ptr,
 	} else {
 		if (likely(req->src == req->dst)) {
 			dst_dma = src_dma + sizeof(struct sec4_sg_entry) *
-				  edesc->assoc_nents;
+				  (edesc->assoc_nents +
+				   (is_gcm ? 1 + edesc->src_nents : 0));
 			out_options = LDST_SGF;
 		} else {
 			dst_dma = edesc->sec4_sg_dma +
@@ -1271,6 +2557,54 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
 	append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
 }
 
+/*
+ * Fill in ablkcipher givencrypt job descriptor
+ */
+static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
+				    struct ablkcipher_edesc *edesc,
+				    struct ablkcipher_request *req,
+				    bool iv_contig)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	u32 *desc = edesc->hw_desc;
+	u32 out_options, in_options;
+	dma_addr_t dst_dma, src_dma;
+	int len, sec4_sg_index = 0;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+		       ivsize, 1);
+	print_hex_dump(KERN_ERR, "src    @" __stringify(__LINE__) ": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+		       edesc->src_nents ? 100 : req->nbytes, 1);
+#endif
+
+	len = desc_len(sh_desc);
+	init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
+
+	if (!edesc->src_nents) {
+		src_dma = sg_dma_address(req->src);
+		in_options = 0;
+	} else {
+		src_dma = edesc->sec4_sg_dma;
+		sec4_sg_index += edesc->src_nents;
+		in_options = LDST_SGF;
+	}
+	append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
+
+	if (iv_contig) {
+		dst_dma = edesc->iv_dma;
+		out_options = 0;
+	} else {
+		dst_dma = edesc->sec4_sg_dma +
+			  sec4_sg_index * sizeof(struct sec4_sg_entry);
+		out_options = LDST_SGF;
+	}
+	append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
+}
+
 /*
  * allocate and map the aead extended descriptor
  */
@@ -1292,6 +2626,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 	int ivsize = crypto_aead_ivsize(aead);
 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
 	unsigned int authsize = ctx->authsize;
+	bool is_gcm = false;
 
 	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
 
@@ -1326,15 +2661,31 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 		return ERR_PTR(-ENOMEM);
 	}
 
-	/* Check if data are contiguous */
-	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
-	    iv_dma || src_nents || iv_dma + ivsize !=
-	    sg_dma_address(req->src)) {
-		all_contig = false;
+	if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+	      OP_ALG_ALGSEL_AES) &&
+	    ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+		is_gcm = true;
+
+	/*
+	 * Check if data are contiguous.
+	 * GCM expected input sequence: IV, AAD, text
+	 * All other - expected input sequence: AAD, IV, text
+	 */
+	if (is_gcm)
+		all_contig = (!assoc_nents &&
+			      iv_dma + ivsize == sg_dma_address(req->assoc) &&
+			      !src_nents && sg_dma_address(req->assoc) +
+			      req->assoclen == sg_dma_address(req->src));
+	else
+		all_contig = (!assoc_nents && sg_dma_address(req->assoc) +
+			      req->assoclen == iv_dma && !src_nents &&
+			      iv_dma + ivsize == sg_dma_address(req->src));
+	if (!all_contig) {
 		assoc_nents = assoc_nents ? : 1;
 		src_nents = src_nents ? : 1;
 		sec4_sg_len = assoc_nents + 1 + src_nents;
 	}
+
 	sec4_sg_len += dst_nents;
 
 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
@@ -1361,14 +2712,26 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
 
 	sec4_sg_index = 0;
 	if (!all_contig) {
-		sg_to_sec4_sg(req->assoc,
-			      (assoc_nents ? : 1),
-			      edesc->sec4_sg +
-			      sec4_sg_index, 0);
-		sec4_sg_index += assoc_nents ? : 1;
+		if (!is_gcm) {
+			sg_to_sec4_sg(req->assoc,
+				      (assoc_nents ? : 1),
+				      edesc->sec4_sg +
+				      sec4_sg_index, 0);
+			sec4_sg_index += assoc_nents ? : 1;
+		}
+
 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
 				   iv_dma, ivsize, 0);
 		sec4_sg_index += 1;
+
+		if (is_gcm) {
+			sg_to_sec4_sg(req->assoc,
+				      (assoc_nents ? : 1),
+				      edesc->sec4_sg +
+				      sec4_sg_index, 0);
+			sec4_sg_index += assoc_nents ? : 1;
+		}
+
 		sg_to_sec4_sg_last(req->src,
 				   (src_nents ? : 1),
 				   edesc->sec4_sg +
@@ -1490,6 +2853,7 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
 	int ivsize = crypto_aead_ivsize(aead);
 	bool assoc_chained = false, src_chained = false, dst_chained = false;
 	int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
+	bool is_gcm = false;
 
 	assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained);
 	src_nents = sg_count(req->src, req->cryptlen, &src_chained);
@@ -1516,24 +2880,53 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
 		return ERR_PTR(-ENOMEM);
 	}
 
-	/* Check if data are contiguous */
-	if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
-	    iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
-		contig &= ~GIV_SRC_CONTIG;
+	if (((ctx->class1_alg_type & OP_ALG_ALGSEL_MASK) ==
+	      OP_ALG_ALGSEL_AES) &&
+	    ((ctx->class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_GCM))
+		is_gcm = true;
+
+	/*
+	 * Check if data are contiguous.
+	 * GCM expected input sequence: IV, AAD, text
+	 * All other - expected input sequence: AAD, IV, text
+	 */
+
+	if (is_gcm) {
+		if (assoc_nents || iv_dma + ivsize !=
+		    sg_dma_address(req->assoc) || src_nents ||
+		    sg_dma_address(req->assoc) + req->assoclen !=
+		    sg_dma_address(req->src))
+			contig &= ~GIV_SRC_CONTIG;
+	} else {
+		if (assoc_nents ||
+		    sg_dma_address(req->assoc) + req->assoclen != iv_dma ||
+		    src_nents || iv_dma + ivsize != sg_dma_address(req->src))
+			contig &= ~GIV_SRC_CONTIG;
+	}
+
 	if (dst_nents || iv_dma + ivsize != sg_dma_address(req->dst))
 		contig &= ~GIV_DST_CONTIG;
-	if (unlikely(req->src != req->dst)) {
-		dst_nents = dst_nents ? : 1;
-		sec4_sg_len += 1;
-	}
+
 	if (!(contig & GIV_SRC_CONTIG)) {
 		assoc_nents = assoc_nents ? : 1;
 		src_nents = src_nents ? : 1;
 		sec4_sg_len += assoc_nents + 1 + src_nents;
-		if (likely(req->src == req->dst))
+		if (req->src == req->dst &&
+		    (src_nents || iv_dma + ivsize != sg_dma_address(req->src)))
 			contig &= ~GIV_DST_CONTIG;
 	}
-	sec4_sg_len += dst_nents;
+
+	/*
+	 * Add new sg entries for GCM output sequence.
+	 * Expected output sequence: IV, encrypted text.
+	 */
+	if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG))
+		sec4_sg_len += 1 + src_nents;
+
+	if (unlikely(req->src != req->dst)) {
+		dst_nents = dst_nents ? : 1;
+		sec4_sg_len += 1 + dst_nents;
+	}
 
 	sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
 
@@ -1559,18 +2952,36 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
 
 	sec4_sg_index = 0;
 	if (!(contig & GIV_SRC_CONTIG)) {
-		sg_to_sec4_sg(req->assoc, assoc_nents,
-			      edesc->sec4_sg +
-			      sec4_sg_index, 0);
-		sec4_sg_index += assoc_nents;
+		if (!is_gcm) {
+			sg_to_sec4_sg(req->assoc, assoc_nents,
+				      edesc->sec4_sg + sec4_sg_index, 0);
+			sec4_sg_index += assoc_nents;
+		}
+
 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
 				   iv_dma, ivsize, 0);
 		sec4_sg_index += 1;
+
+		if (is_gcm) {
+			sg_to_sec4_sg(req->assoc, assoc_nents,
+				      edesc->sec4_sg + sec4_sg_index, 0);
+			sec4_sg_index += assoc_nents;
+		}
+
 		sg_to_sec4_sg_last(req->src, src_nents,
 				   edesc->sec4_sg +
 				   sec4_sg_index, 0);
 		sec4_sg_index += src_nents;
 	}
+
+	if (is_gcm && req->src == req->dst && !(contig & GIV_DST_CONTIG)) {
+		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+				   iv_dma, ivsize, 0);
+		sec4_sg_index += 1;
+		sg_to_sec4_sg_last(req->src, src_nents,
+				   edesc->sec4_sg + sec4_sg_index, 0);
+	}
+
 	if (unlikely(req->src != req->dst && !(contig & GIV_DST_CONTIG))) {
 		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
 				   iv_dma, ivsize, 0);
@@ -1595,56 +3006,235 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	struct caam_ctx *ctx = crypto_aead_ctx(aead);
 	struct device *jrdev = ctx->jrdev;
-	u32 contig;
+	u32 contig;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
+				     CAAM_CMD_SZ, &contig);
+
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
+		       req->cryptlen, 1);
+#endif
+
+	/* Create and submit job descriptor*/
+	init_aead_giv_job(ctx->sh_desc_givenc,
+			  ctx->sh_desc_givenc_dma, edesc, req, contig);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		aead_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
+{
+	return aead_encrypt(&areq->areq);
+}
+
+/*
+ * allocate and map the ablkcipher extended descriptor for ablkcipher
+ */
+static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+						       *req, int desc_bytes,
+						       bool *iv_contig_out)
+{
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+					  CRYPTO_TFM_REQ_MAY_SLEEP)) ?
+		       GFP_KERNEL : GFP_ATOMIC;
+	int src_nents, dst_nents = 0, sec4_sg_bytes;
+	struct ablkcipher_edesc *edesc;
+	dma_addr_t iv_dma = 0;
+	bool iv_contig = false;
+	int sgc;
+	int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+	bool src_chained = false, dst_chained = false;
+	int sec4_sg_index;
+
+	src_nents = sg_count(req->src, req->nbytes, &src_chained);
+
+	if (req->dst != req->src)
+		dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
+
+	if (likely(req->src == req->dst)) {
+		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+					 DMA_BIDIRECTIONAL, src_chained);
+	} else {
+		sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
+					 DMA_TO_DEVICE, src_chained);
+		sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
+					 DMA_FROM_DEVICE, dst_chained);
+	}
+
+	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, iv_dma)) {
+		dev_err(jrdev, "unable to map IV\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/*
+	 * Check if iv can be contiguous with source and destination.
+	 * If so, include it. If not, create scatterlist.
+	 */
+	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
+		iv_contig = true;
+	else
+		src_nents = src_nents ? : 1;
+	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
+			sizeof(struct sec4_sg_entry);
+
+	/* allocate space for base edesc and hw desc commands, link tables */
+	edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
+			sec4_sg_bytes, GFP_DMA | flags);
+	if (!edesc) {
+		dev_err(jrdev, "could not allocate extended descriptor\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->src_nents = src_nents;
+	edesc->src_chained = src_chained;
+	edesc->dst_nents = dst_nents;
+	edesc->dst_chained = dst_chained;
+	edesc->sec4_sg_bytes = sec4_sg_bytes;
+	edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
+			 desc_bytes;
+
+	sec4_sg_index = 0;
+	if (!iv_contig) {
+		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
+		sg_to_sec4_sg_last(req->src, src_nents,
+				   edesc->sec4_sg + 1, 0);
+		sec4_sg_index += 1 + src_nents;
+	}
+
+	if (dst_nents) {
+		sg_to_sec4_sg_last(req->dst, dst_nents,
+			edesc->sec4_sg + sec4_sg_index, 0);
+	}
+
+	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+					    sec4_sg_bytes, DMA_TO_DEVICE);
+	if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+		dev_err(jrdev, "unable to map S/G table\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	edesc->iv_dma = iv_dma;
+
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
+		       sec4_sg_bytes, 1);
+#endif
+
+	*iv_contig_out = iv_contig;
+	return edesc;
+}
+
+static int ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	bool iv_contig;
+	u32 *desc;
+	int ret = 0;
+
+	/* allocate extended descriptor */
+	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+				       CAAM_CMD_SZ, &iv_contig);
+	if (IS_ERR(edesc))
+		return PTR_ERR(edesc);
+
+	/* Create and submit job descriptor*/
+	init_ablkcipher_job(ctx->sh_desc_enc,
+		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
+#ifdef DEBUG
+	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
+		       desc_bytes(edesc->hw_desc), 1);
+#endif
+	desc = edesc->hw_desc;
+	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+
+	if (!ret) {
+		ret = -EINPROGRESS;
+	} else {
+		ablkcipher_unmap(jrdev, edesc, req);
+		kfree(edesc);
+	}
+
+	return ret;
+}
+
+static int ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+	struct ablkcipher_edesc *edesc;
+	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+	struct device *jrdev = ctx->jrdev;
+	bool iv_contig;
 	u32 *desc;
 	int ret = 0;
 
 	/* allocate extended descriptor */
-	edesc = aead_giv_edesc_alloc(areq, DESC_JOB_IO_LEN *
-				     CAAM_CMD_SZ, &contig);
-
+	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+				       CAAM_CMD_SZ, &iv_contig);
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "giv src@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
-		       req->cryptlen, 1);
-#endif
-
 	/* Create and submit job descriptor*/
-	init_aead_giv_job(ctx->sh_desc_givenc,
-			  ctx->sh_desc_givenc_dma, edesc, req, contig);
+	init_ablkcipher_job(ctx->sh_desc_dec,
+		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
+	desc = edesc->hw_desc;
 #ifdef DEBUG
-	print_hex_dump(KERN_ERR, "aead jobdesc@"__stringify(__LINE__)": ",
+	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
 		       desc_bytes(edesc->hw_desc), 1);
 #endif
 
-	desc = edesc->hw_desc;
-	ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req);
+	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
 	if (!ret) {
 		ret = -EINPROGRESS;
 	} else {
-		aead_unmap(jrdev, edesc, req);
+		ablkcipher_unmap(jrdev, edesc, req);
 		kfree(edesc);
 	}
 
 	return ret;
 }
 
-static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
-{
-	return aead_encrypt(&areq->areq);
-}
-
 /*
- * allocate and map the ablkcipher extended descriptor for ablkcipher
+ * allocate and map the ablkcipher extended descriptor
+ * for ablkcipher givencrypt
  */
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
-						       *req, int desc_bytes,
-						       bool *iv_contig_out)
+static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
+				struct skcipher_givcrypt_request *greq,
+				int desc_bytes,
+				bool *iv_contig_out)
 {
+	struct ablkcipher_request *req = &greq->creq;
 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
 	struct device *jrdev = ctx->jrdev;
@@ -1662,7 +3252,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 
 	src_nents = sg_count(req->src, req->nbytes, &src_chained);
 
-	if (req->dst != req->src)
+	if (unlikely(req->dst != req->src))
 		dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
 
 	if (likely(req->src == req->dst)) {
@@ -1675,25 +3265,25 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 					 DMA_FROM_DEVICE, dst_chained);
 	}
 
-	iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+	/*
+	 * Check if iv can be contiguous with source and destination.
+	 * If so, include it. If not, create scatterlist.
+	 */
+	iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
 	if (dma_mapping_error(jrdev, iv_dma)) {
 		dev_err(jrdev, "unable to map IV\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
-	/*
-	 * Check if iv can be contiguous with source and destination.
-	 * If so, include it. If not, create scatterlist.
-	 */
-	if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
+	if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
 		iv_contig = true;
 	else
-		src_nents = src_nents ? : 1;
+		dst_nents = dst_nents ? : 1;
 	sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
 			sizeof(struct sec4_sg_entry);
 
 	/* allocate space for base edesc and hw desc commands, link tables */
-	edesc = kmalloc(sizeof(struct ablkcipher_edesc) + desc_bytes +
+	edesc = kmalloc(sizeof(*edesc) + desc_bytes +
 			sec4_sg_bytes, GFP_DMA | flags);
 	if (!edesc) {
 		dev_err(jrdev, "could not allocate extended descriptor\n");
@@ -1709,16 +3299,17 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 			 desc_bytes;
 
 	sec4_sg_index = 0;
-	if (!iv_contig) {
-		dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
-		sg_to_sec4_sg_last(req->src, src_nents,
-				   edesc->sec4_sg + 1, 0);
-		sec4_sg_index += 1 + src_nents;
+	if (src_nents) {
+		sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+		sec4_sg_index += src_nents;
 	}
 
-	if (dst_nents) {
+	if (!iv_contig) {
+		dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
+				   iv_dma, ivsize, 0);
+		sec4_sg_index += 1;
 		sg_to_sec4_sg_last(req->dst, dst_nents,
-			edesc->sec4_sg + sec4_sg_index, 0);
+				   edesc->sec4_sg + sec4_sg_index, 0);
 	}
 
 	edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
@@ -1727,11 +3318,11 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 		dev_err(jrdev, "unable to map S/G table\n");
 		return ERR_PTR(-ENOMEM);
 	}
-
 	edesc->iv_dma = iv_dma;
 
 #ifdef DEBUG
-	print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
 		       sec4_sg_bytes, 1);
 #endif
@@ -1740,8 +3331,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 	return edesc;
 }
 
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
 {
+	struct ablkcipher_request *req = &creq->creq;
 	struct ablkcipher_edesc *edesc;
 	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
 	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
@@ -1751,16 +3343,17 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req)
 	int ret = 0;
 
 	/* allocate extended descriptor */
-	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
+	edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
 				       CAAM_CMD_SZ, &iv_contig);
 	if (IS_ERR(edesc))
 		return PTR_ERR(edesc);
 
 	/* Create and submit job descriptor*/
-	init_ablkcipher_job(ctx->sh_desc_enc,
-		ctx->sh_desc_enc_dma, edesc, req, iv_contig);
+	init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
+				edesc, req, iv_contig);
 #ifdef DEBUG
-	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+	print_hex_dump(KERN_ERR,
+		       "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
 		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
 		       desc_bytes(edesc->hw_desc), 1);
 #endif
@@ -1777,43 +3370,6 @@ static int ablkcipher_encrypt(struct ablkcipher_request *req)
 	return ret;
 }
 
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
-{
-	struct ablkcipher_edesc *edesc;
-	struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
-	struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
-	struct device *jrdev = ctx->jrdev;
-	bool iv_contig;
-	u32 *desc;
-	int ret = 0;
-
-	/* allocate extended descriptor */
-	edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN *
-				       CAAM_CMD_SZ, &iv_contig);
-	if (IS_ERR(edesc))
-		return PTR_ERR(edesc);
-
-	/* Create and submit job descriptor*/
-	init_ablkcipher_job(ctx->sh_desc_dec,
-		ctx->sh_desc_dec_dma, edesc, req, iv_contig);
-	desc = edesc->hw_desc;
-#ifdef DEBUG
-	print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
-		       DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
-		       desc_bytes(edesc->hw_desc), 1);
-#endif
-
-	ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
-	if (!ret) {
-		ret = -EINPROGRESS;
-	} else {
-		ablkcipher_unmap(jrdev, edesc, req);
-		kfree(edesc);
-	}
-
-	return ret;
-}
-
 #define template_aead		template_u.aead
 #define template_ablkcipher	template_u.ablkcipher
 struct caam_alg_template {
@@ -2309,17 +3865,188 @@ static struct caam_alg_template driver_algs[] = {
 				   OP_ALG_AAI_HMAC_PRECOMP,
 		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
 	},
+	{
+		.name = "authenc(hmac(md5),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-md5-rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = MD5_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+		.class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA1_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA224_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA256_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA384_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
+		.driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = aead_setkey,
+			.setauthsize = aead_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			.maxauthsize = SHA512_DIGEST_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+		.class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+				   OP_ALG_AAI_HMAC_PRECOMP,
+		.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+	},
+	{
+		.name = "rfc4106(gcm(aes))",
+		.driver_name = "rfc4106-gcm-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = rfc4106_setkey,
+			.setauthsize = rfc4106_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = 8,
+			.maxauthsize = AES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+	},
+	{
+		.name = "rfc4543(gcm(aes))",
+		.driver_name = "rfc4543-gcm-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = rfc4543_setkey,
+			.setauthsize = rfc4543_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = aead_givencrypt,
+			.geniv = "<built-in>",
+			.ivsize = 8,
+			.maxauthsize = AES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+	},
+	/* Galois Counter Mode */
+	{
+		.name = "gcm(aes)",
+		.driver_name = "gcm-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_AEAD,
+		.template_aead = {
+			.setkey = gcm_setkey,
+			.setauthsize = gcm_setauthsize,
+			.encrypt = aead_encrypt,
+			.decrypt = aead_decrypt,
+			.givencrypt = NULL,
+			.geniv = "<built-in>",
+			.ivsize = 12,
+			.maxauthsize = AES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+	},
 	/* ablkcipher descriptor */
 	{
 		.name = "cbc(aes)",
 		.driver_name = "cbc-aes-caam",
 		.blocksize = AES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
 		.template_ablkcipher = {
 			.setkey = ablkcipher_setkey,
 			.encrypt = ablkcipher_encrypt,
 			.decrypt = ablkcipher_decrypt,
-			.geniv = "eseqiv",
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
 			.min_keysize = AES_MIN_KEY_SIZE,
 			.max_keysize = AES_MAX_KEY_SIZE,
 			.ivsize = AES_BLOCK_SIZE,
@@ -2330,12 +4057,13 @@ static struct caam_alg_template driver_algs[] = {
 		.name = "cbc(des3_ede)",
 		.driver_name = "cbc-3des-caam",
 		.blocksize = DES3_EDE_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
 		.template_ablkcipher = {
 			.setkey = ablkcipher_setkey,
 			.encrypt = ablkcipher_encrypt,
 			.decrypt = ablkcipher_decrypt,
-			.geniv = "eseqiv",
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
 			.min_keysize = DES3_EDE_KEY_SIZE,
 			.max_keysize = DES3_EDE_KEY_SIZE,
 			.ivsize = DES3_EDE_BLOCK_SIZE,
@@ -2346,17 +4074,53 @@ static struct caam_alg_template driver_algs[] = {
 		.name = "cbc(des)",
 		.driver_name = "cbc-des-caam",
 		.blocksize = DES_BLOCK_SIZE,
-		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
 		.template_ablkcipher = {
 			.setkey = ablkcipher_setkey,
 			.encrypt = ablkcipher_encrypt,
 			.decrypt = ablkcipher_decrypt,
-			.geniv = "eseqiv",
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
 			.min_keysize = DES_KEY_SIZE,
 			.max_keysize = DES_KEY_SIZE,
 			.ivsize = DES_BLOCK_SIZE,
 			},
 		.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+	},
+	{
+		.name = "ctr(aes)",
+		.driver_name = "ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.geniv = "chainiv",
+			.min_keysize = AES_MIN_KEY_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE,
+			.ivsize = AES_BLOCK_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+	},
+	{
+		.name = "rfc3686(ctr(aes))",
+		.driver_name = "rfc3686-ctr-aes-caam",
+		.blocksize = 1,
+		.type = CRYPTO_ALG_TYPE_GIVCIPHER,
+		.template_ablkcipher = {
+			.setkey = ablkcipher_setkey,
+			.encrypt = ablkcipher_encrypt,
+			.decrypt = ablkcipher_decrypt,
+			.givencrypt = ablkcipher_givencrypt,
+			.geniv = "<built-in>",
+			.min_keysize = AES_MIN_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.max_keysize = AES_MAX_KEY_SIZE +
+				       CTR_RFC3686_NONCE_SIZE,
+			.ivsize = CTR_RFC3686_IV_SIZE,
+			},
+		.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
 	}
 };
 
@@ -2457,6 +4221,10 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
 	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
 			 template->type;
 	switch (template->type) {
+	case CRYPTO_ALG_TYPE_GIVCIPHER:
+		alg->cra_type = &crypto_givcipher_type;
+		alg->cra_ablkcipher = template->template_ablkcipher;
+		break;
 	case CRYPTO_ALG_TYPE_ABLKCIPHER:
 		alg->cra_type = &crypto_ablkcipher_type;
 		alg->cra_ablkcipher = template->template_ablkcipher;
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index f227922cea38f2dece5c613bad7f0fb2267f3fa7..acd7743e2603cdd0e9f25aed0c4a933dfc423b70 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -28,6 +28,7 @@
 #include <crypto/algapi.h>
 #include <crypto/null.h>
 #include <crypto/aes.h>
+#include <crypto/ctr.h>
 #include <crypto/des.h>
 #include <crypto/sha.h>
 #include <crypto/md5.h>
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 7eec20bb38498f11be36a54ff85d28c8c5ac4b6b..9f79fd7bd4d7d1b7589a2facdc15e7667dbb8d48 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -192,6 +192,8 @@ static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
 	PRINT_POS; \
 	append_cmd(desc, CMD_##op | len | options); \
 }
+
+APPEND_CMD_LEN(seq_load, SEQ_LOAD)
 APPEND_CMD_LEN(seq_store, SEQ_STORE)
 APPEND_CMD_LEN(seq_fifo_load, SEQ_FIFO_LOAD)
 APPEND_CMD_LEN(seq_fifo_store, SEQ_FIFO_STORE)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 6531054a44c8b8feb3ec00ff9ad1f139a5a8ebc1..66d73bf54166e2e3beb3a0fe7ad564949bfee831 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -213,27 +213,36 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
 		void (*report_ssed)(struct device *jrdev, const u32 status,
 				    const char *error);
 		const char *error;
-	} status_src[] = {
+	} status_src[16] = {
 		{ NULL, "No error" },
 		{ NULL, NULL },
 		{ report_ccb_status, "CCB" },
 		{ report_jump_status, "Jump" },
 		{ report_deco_status, "DECO" },
-		{ NULL, NULL },
+		{ NULL, "Queue Manager Interface" },
 		{ report_jr_status, "Job Ring" },
 		{ report_cond_code_status, "Condition Code" },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
+		{ NULL, NULL },
 	};
 	u32 ssrc = status >> JRSTA_SSRC_SHIFT;
 	const char *error = status_src[ssrc].error;
 
 	/*
-	 * If there is no further error handling function, just
-	 * print the error code, error string and exit. Otherwise
-	 * call the handler function.
+	 * If there is an error handling function, call it to report the error.
+	 * Otherwise print the error source name.
 	 */
-	if (!status_src[ssrc].report_ssed)
-		dev_err(jrdev, "%08x: %s: \n", status, status_src[ssrc].error);
-	else
+	if (status_src[ssrc].report_ssed)
 		status_src[ssrc].report_ssed(jrdev, status, error);
+	else if (error)
+		dev_err(jrdev, "%d: %s\n", ssrc, error);
+	else
+		dev_err(jrdev, "%d: unknown error source\n", ssrc);
 }
 EXPORT_SYMBOL(caam_jr_strstatus);
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 4d18e27ffa9e72da4a7763e87d783dec89ba4b66..9207c907a128bd5ff938a2b194ffb42c385aeb9b 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -181,8 +181,6 @@ static void caam_jr_dequeue(unsigned long devarg)
 		for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
 			sw_idx = (tail + i) & (JOBR_DEPTH - 1);
 
-			smp_read_barrier_depends();
-
 			if (jrp->outring[hw_idx].desc ==
 			    jrp->entinfo[sw_idx].desc_addr_dma)
 				break; /* found */
@@ -218,7 +216,6 @@ static void caam_jr_dequeue(unsigned long devarg)
 		if (sw_idx == tail) {
 			do {
 				tail = (tail + 1) & (JOBR_DEPTH - 1);
-				smp_read_barrier_depends();
 			} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
 				 jrp->entinfo[tail].desc_addr_dma == 0);
 
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index cc00b52306ba69f45fcb90829719cbfb8946fdfd..a066cc3450aee83bff092db433ec7e2c03d7be0d 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -72,27 +72,19 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	unsigned long irq_flags;
 	unsigned int processed = 0, to_process;
-	u32 max_sg_len;
 	int rc;
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
-	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
-			   nx_ctx->ap->sglen);
-
 	if (enc)
 		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
 	else
 		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
 
 	do {
-		to_process = min_t(u64, nbytes - processed,
-				   nx_ctx->ap->databytelen);
-		to_process = min_t(u64, to_process,
-				   NX_PAGE_SIZE * (max_sg_len - 1));
-		to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+		to_process = nbytes - processed;
 
-		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
+		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
 				       processed, csbcpb->cpb.aes_cbc.iv);
 		if (rc)
 			goto out;
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index 5ecd4c2414aa85bbc9cbca1b4311a954dfcd3710..67f80813a06f9a780e6573a7dd995f0150c47dcf 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -181,6 +181,7 @@ static int generate_pat(u8                   *iv,
 	unsigned int iauth_len = 0;
 	u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
 	int rc;
+	unsigned int max_sg_len;
 
 	/* zero the ctr value */
 	memset(iv + 15 - iv[0], 0, iv[0] + 1);
@@ -248,10 +249,19 @@ static int generate_pat(u8                   *iv,
 	if (!req->assoclen) {
 		return rc;
 	} else if (req->assoclen <= 14) {
-		nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen);
-		nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16,
+		unsigned int len = 16;
+
+		nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen);
+
+		if (len != 16)
+			return -EINVAL;
+
+		nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len,
 					    nx_ctx->ap->sglen);
 
+		if (len != 16)
+			return -EINVAL;
+
 		/* inlen should be negative, indicating to phyp that its a
 		 * pointer to an sg list */
 		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) *
@@ -273,21 +283,24 @@ static int generate_pat(u8                   *iv,
 		atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
 
 	} else {
-		u32 max_sg_len;
 		unsigned int processed = 0, to_process;
 
-		/* page_limit: number of sg entries that fit on one page */
-		max_sg_len = min_t(u32,
-				   nx_driver.of.max_sg_len/sizeof(struct nx_sg),
-				   nx_ctx->ap->sglen);
-
 		processed += iauth_len;
 
+		/* page_limit: number of sg entries that fit on one page */
+		max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+				nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+		max_sg_len = min_t(u64, max_sg_len,
+				nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
 		do {
 			to_process = min_t(u32, req->assoclen - processed,
 					   nx_ctx->ap->databytelen);
-			to_process = min_t(u64, to_process,
-					   NX_PAGE_SIZE * (max_sg_len - 1));
+
+			nx_insg = nx_walk_and_build(nx_ctx->in_sg,
+						    nx_ctx->ap->sglen,
+						    req->assoc, processed,
+						    &to_process);
 
 			if ((to_process + processed) < req->assoclen) {
 				NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
@@ -297,10 +310,6 @@ static int generate_pat(u8                   *iv,
 					~NX_FDM_INTERMEDIATE;
 			}
 
-			nx_insg = nx_walk_and_build(nx_ctx->in_sg,
-						    nx_ctx->ap->sglen,
-						    req->assoc, processed,
-						    to_process);
 
 			nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
 						sizeof(struct nx_sg);
@@ -343,7 +352,6 @@ static int ccm_nx_decrypt(struct aead_request   *req,
 	struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
 	unsigned long irq_flags;
 	unsigned int processed = 0, to_process;
-	u32 max_sg_len;
 	int rc = -1;
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -360,19 +368,12 @@ static int ccm_nx_decrypt(struct aead_request   *req,
 	if (rc)
 		goto out;
 
-	/* page_limit: number of sg entries that fit on one page */
-	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
-			   nx_ctx->ap->sglen);
-
 	do {
 
 		/* to_process: the AES_BLOCK_SIZE data chunk to process in this
 		 * update. This value is bound by sg list limits.
 		 */
-		to_process = min_t(u64, nbytes - processed,
-				   nx_ctx->ap->databytelen);
-		to_process = min_t(u64, to_process,
-				   NX_PAGE_SIZE * (max_sg_len - 1));
+		to_process = nbytes - processed;
 
 		if ((to_process + processed) < nbytes)
 			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
@@ -382,7 +383,7 @@ static int ccm_nx_decrypt(struct aead_request   *req,
 		NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
 
 		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
-					to_process, processed,
+					&to_process, processed,
 					csbcpb->cpb.aes_ccm.iv_or_ctr);
 		if (rc)
 			goto out;
@@ -427,7 +428,6 @@ static int ccm_nx_encrypt(struct aead_request   *req,
 	unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
 	unsigned long irq_flags;
 	unsigned int processed = 0, to_process;
-	u32 max_sg_len;
 	int rc = -1;
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -437,18 +437,11 @@ static int ccm_nx_encrypt(struct aead_request   *req,
 	if (rc)
 		goto out;
 
-	/* page_limit: number of sg entries that fit on one page */
-	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
-			   nx_ctx->ap->sglen);
-
 	do {
 		/* to process: the AES_BLOCK_SIZE data chunk to process in this
 		 * update. This value is bound by sg list limits.
 		 */
-		to_process = min_t(u64, nbytes - processed,
-				   nx_ctx->ap->databytelen);
-		to_process = min_t(u64, to_process,
-				   NX_PAGE_SIZE * (max_sg_len - 1));
+		to_process = nbytes - processed;
 
 		if ((to_process + processed) < nbytes)
 			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
@@ -458,7 +451,7 @@ static int ccm_nx_encrypt(struct aead_request   *req,
 		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
 
 		rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
-					to_process, processed,
+					&to_process, processed,
 				       csbcpb->cpb.aes_ccm.iv_or_ctr);
 		if (rc)
 			goto out;
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index a37d009dc75c76efef57cf7f2e4437c113fcae7e..2617cd4d54dd2e458e8a56de967c468a66ef2f09 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -90,22 +90,14 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	unsigned long irq_flags;
 	unsigned int processed = 0, to_process;
-	u32 max_sg_len;
 	int rc;
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
-	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
-			   nx_ctx->ap->sglen);
-
 	do {
-		to_process = min_t(u64, nbytes - processed,
-				   nx_ctx->ap->databytelen);
-		to_process = min_t(u64, to_process,
-				   NX_PAGE_SIZE * (max_sg_len - 1));
-		to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+		to_process = nbytes - processed;
 
-		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
+		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
 				       processed, csbcpb->cpb.aes_ctr.iv);
 		if (rc)
 			goto out;
@@ -143,6 +135,7 @@ static int ctr3686_aes_nx_crypt(struct blkcipher_desc *desc,
 
 	memcpy(iv + CTR_RFC3686_NONCE_SIZE,
 	       desc->info, CTR_RFC3686_IV_SIZE);
+	iv[12] = iv[13] = iv[14] = 0;
 	iv[15] = 1;
 
 	desc->info = nx_ctx->priv.ctr.iv;
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 85a8d23cf29d72b14180c6f7e7777786010c2094..cfdde8b8bc76dc84a199f7fcfd2143f501e77f64 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -72,27 +72,19 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	unsigned long irq_flags;
 	unsigned int processed = 0, to_process;
-	u32 max_sg_len;
 	int rc;
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
-	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
-			   nx_ctx->ap->sglen);
-
 	if (enc)
 		NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
 	else
 		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
 
 	do {
-		to_process = min_t(u64, nbytes - processed,
-				   nx_ctx->ap->databytelen);
-		to_process = min_t(u64, to_process,
-				   NX_PAGE_SIZE * (max_sg_len - 1));
-		to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+		to_process = nbytes - processed;
 
-		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
+		rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process,
 				processed, NULL);
 		if (rc)
 			goto out;
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 025d9a8d5b1908126804c8468d30ec6902cb4c59..88c562434bc0b737b2a6d99856e6dda284424634 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -131,7 +131,7 @@ static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
 	struct nx_sg *nx_sg = nx_ctx->in_sg;
 	unsigned int nbytes = req->assoclen;
 	unsigned int processed = 0, to_process;
-	u32 max_sg_len;
+	unsigned int max_sg_len;
 
 	if (nbytes <= AES_BLOCK_SIZE) {
 		scatterwalk_start(&walk, req->assoc);
@@ -143,8 +143,10 @@ static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
 	NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
 
 	/* page_limit: number of sg entries that fit on one page */
-	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
 			   nx_ctx->ap->sglen);
+	max_sg_len = min_t(u64, max_sg_len,
+			   nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 
 	do {
 		/*
@@ -156,13 +158,14 @@ static int nx_gca(struct nx_crypto_ctx  *nx_ctx,
 		to_process = min_t(u64, to_process,
 				   NX_PAGE_SIZE * (max_sg_len - 1));
 
+		nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
+					  req->assoc, processed, &to_process);
+
 		if ((to_process + processed) < nbytes)
 			NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
 		else
 			NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
 
-		nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
-					  req->assoc, processed, to_process);
 		nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
 					* sizeof(struct nx_sg);
 
@@ -195,7 +198,7 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
 	struct nx_sg *nx_sg;
 	unsigned int nbytes = req->assoclen;
 	unsigned int processed = 0, to_process;
-	u32 max_sg_len;
+	unsigned int max_sg_len;
 
 	/* Set GMAC mode */
 	csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
@@ -203,8 +206,10 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
 	NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
 
 	/* page_limit: number of sg entries that fit on one page */
-	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
 			   nx_ctx->ap->sglen);
+	max_sg_len = min_t(u64, max_sg_len,
+			   nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 
 	/* Copy IV */
 	memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
@@ -219,13 +224,14 @@ static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
 		to_process = min_t(u64, to_process,
 				   NX_PAGE_SIZE * (max_sg_len - 1));
 
+		nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
+					  req->assoc, processed, &to_process);
+
 		if ((to_process + processed) < nbytes)
 			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
 		else
 			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 
-		nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
-					  req->assoc, processed, to_process);
 		nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
 					* sizeof(struct nx_sg);
 
@@ -264,6 +270,7 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	char out[AES_BLOCK_SIZE];
 	struct nx_sg *in_sg, *out_sg;
+	int len;
 
 	/* For scenarios where the input message is zero length, AES CTR mode
 	 * may be used. Set the source data to be a single block (16B) of all
@@ -279,11 +286,22 @@ static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
 	else
 		NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
 
+	len = AES_BLOCK_SIZE;
+
 	/* Encrypt the counter/IV */
 	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
-				 AES_BLOCK_SIZE, nx_ctx->ap->sglen);
-	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, sizeof(out),
+				 &len, nx_ctx->ap->sglen);
+
+	if (len != AES_BLOCK_SIZE)
+		return -EINVAL;
+
+	len = sizeof(out);
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
 				  nx_ctx->ap->sglen);
+
+	if (len != sizeof(out))
+		return -EINVAL;
+
 	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
@@ -317,7 +335,6 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
 	unsigned int nbytes = req->cryptlen;
 	unsigned int processed = 0, to_process;
 	unsigned long irq_flags;
-	u32 max_sg_len;
 	int rc = -EINVAL;
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -354,33 +371,24 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
 		nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
 	}
 
-	/* page_limit: number of sg entries that fit on one page */
-	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
-			   nx_ctx->ap->sglen);
-
 	do {
-		/*
-		 * to_process: the data chunk to process in this update.
-		 * This value is bound by sg list limits.
-		 */
-		to_process = min_t(u64, nbytes - processed,
-				   nx_ctx->ap->databytelen);
-		to_process = min_t(u64, to_process,
-				   NX_PAGE_SIZE * (max_sg_len - 1));
-
-		if ((to_process + processed) < nbytes)
-			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
-		else
-			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+		to_process = nbytes - processed;
 
 		csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
 		desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
 		rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
-				       req->src, to_process, processed,
+				       req->src, &to_process, processed,
 				       csbcpb->cpb.aes_gcm.iv_or_cnt);
+
 		if (rc)
 			goto out;
 
+		if ((to_process + processed) < nbytes)
+			NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+		else
+			NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+
 		rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
 				   req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 		if (rc)
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 03c4bf57d066b230d8750875eb8bdc33429cd357..8c2faffab4a35952c7b28bccf04c7ea5f706a471 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -75,6 +75,7 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
 	u8 keys[2][AES_BLOCK_SIZE];
 	u8 key[32];
 	int rc = 0;
+	int len;
 
 	/* Change to ECB mode */
 	csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
@@ -86,11 +87,20 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
 	memset(keys[0], 0x01, sizeof(keys[0]));
 	memset(keys[1], 0x03, sizeof(keys[1]));
 
+	len = sizeof(keys);
 	/* Generate K1 and K3 encrypting the patterns */
-	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, sizeof(keys),
+	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len,
 				 nx_ctx->ap->sglen);
-	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, sizeof(keys),
+
+	if (len != sizeof(keys))
+		return -EINVAL;
+
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len,
 				  nx_ctx->ap->sglen);
+
+	if (len != sizeof(keys))
+		return -EINVAL;
+
 	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
@@ -103,12 +113,23 @@ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
 	/* XOr K3 with the padding for a 0 length message */
 	keys[1][0] ^= 0x80;
 
+	len = sizeof(keys[1]);
+
 	/* Encrypt the final result */
 	memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE);
-	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], sizeof(keys[1]),
+	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len,
 				 nx_ctx->ap->sglen);
-	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
+
+	if (len != sizeof(keys[1]))
+		return -EINVAL;
+
+	len = AES_BLOCK_SIZE;
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
 				  nx_ctx->ap->sglen);
+
+	if (len != AES_BLOCK_SIZE)
+		return -EINVAL;
+
 	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
@@ -133,6 +154,7 @@ static int nx_xcbc_init(struct shash_desc *desc)
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct nx_sg *out_sg;
+	int len;
 
 	nx_ctx_init(nx_ctx, HCOP_FC_AES);
 
@@ -144,8 +166,13 @@ static int nx_xcbc_init(struct shash_desc *desc)
 	memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
 	memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
 
+	len = AES_BLOCK_SIZE;
 	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
-				  AES_BLOCK_SIZE, nx_ctx->ap->sglen);
+				  &len, nx_ctx->ap->sglen);
+
+	if (len != AES_BLOCK_SIZE)
+		return -EINVAL;
+
 	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
 	return 0;
@@ -159,10 +186,11 @@ static int nx_xcbc_update(struct shash_desc *desc,
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 	struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
 	struct nx_sg *in_sg;
-	u32 to_process, leftover, total;
-	u32 max_sg_len;
+	u32 to_process = 0, leftover, total;
+	unsigned int max_sg_len;
 	unsigned long irq_flags;
 	int rc = 0;
+	int data_len;
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
@@ -180,17 +208,15 @@ static int nx_xcbc_update(struct shash_desc *desc,
 	}
 
 	in_sg = nx_ctx->in_sg;
-	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+	max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
 				nx_ctx->ap->sglen);
+	max_sg_len = min_t(u64, max_sg_len,
+				nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 
 	do {
-
-		/* to_process: the AES_BLOCK_SIZE data chunk to process in this
-		 * update */
-		to_process = min_t(u64, total, nx_ctx->ap->databytelen);
-		to_process = min_t(u64, to_process,
-					NX_PAGE_SIZE * (max_sg_len - 1));
+		to_process = total - to_process;
 		to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+
 		leftover = total - to_process;
 
 		/* the hardware will not accept a 0 byte operation for this
@@ -204,15 +230,24 @@ static int nx_xcbc_update(struct shash_desc *desc,
 		}
 
 		if (sctx->count) {
+			data_len = sctx->count;
 			in_sg = nx_build_sg_list(nx_ctx->in_sg,
 						(u8 *) sctx->buffer,
-						sctx->count,
+						&data_len,
 						max_sg_len);
+			if (data_len != sctx->count)
+				return -EINVAL;
 		}
+
+		data_len = to_process - sctx->count;
 		in_sg = nx_build_sg_list(in_sg,
 					(u8 *) data,
-					to_process - sctx->count,
+					&data_len,
 					max_sg_len);
+
+		if (data_len != to_process - sctx->count)
+			return -EINVAL;
+
 		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
 					sizeof(struct nx_sg);
 
@@ -263,6 +298,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
 	struct nx_sg *in_sg, *out_sg;
 	unsigned long irq_flags;
 	int rc = 0;
+	int len;
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
@@ -285,11 +321,20 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
 	 * this is not an intermediate operation */
 	NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 
+	len = sctx->count;
 	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
-				 sctx->count, nx_ctx->ap->sglen);
-	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
+				 &len, nx_ctx->ap->sglen);
+
+	if (len != sctx->count)
+		return -EINVAL;
+
+	len = AES_BLOCK_SIZE;
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
 				  nx_ctx->ap->sglen);
 
+	if (len != AES_BLOCK_SIZE)
+		return -EINVAL;
+
 	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index da0b24a7633f0c55626d6c9ecefe8211c7ef7817..23621da624c35b3192d31f1bd897b10d32fc2a20 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -23,6 +23,7 @@
 #include <crypto/sha.h>
 #include <linux/module.h>
 #include <asm/vio.h>
+#include <asm/byteorder.h>
 
 #include "nx_csbcpb.h"
 #include "nx.h"
@@ -32,7 +33,8 @@ static int nx_sha256_init(struct shash_desc *desc)
 {
 	struct sha256_state *sctx = shash_desc_ctx(desc);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
-	struct nx_sg *out_sg;
+	int len;
+	int rc;
 
 	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
 
@@ -41,10 +43,28 @@ static int nx_sha256_init(struct shash_desc *desc)
 	nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
 
 	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
-	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
-				  SHA256_DIGEST_SIZE, nx_ctx->ap->sglen);
-	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
+	len = SHA256_DIGEST_SIZE;
+	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+				  &nx_ctx->op.outlen,
+				  &len,
+				  (u8 *) sctx->state,
+				  NX_DS_SHA256);
+
+	if (rc)
+		goto out;
+
+	sctx->state[0] = __cpu_to_be32(SHA256_H0);
+	sctx->state[1] = __cpu_to_be32(SHA256_H1);
+	sctx->state[2] = __cpu_to_be32(SHA256_H2);
+	sctx->state[3] = __cpu_to_be32(SHA256_H3);
+	sctx->state[4] = __cpu_to_be32(SHA256_H4);
+	sctx->state[5] = __cpu_to_be32(SHA256_H5);
+	sctx->state[6] = __cpu_to_be32(SHA256_H6);
+	sctx->state[7] = __cpu_to_be32(SHA256_H7);
+	sctx->count = 0;
+
+out:
 	return 0;
 }
 
@@ -54,11 +74,11 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
 	struct sha256_state *sctx = shash_desc_ctx(desc);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-	struct nx_sg *in_sg;
-	u64 to_process, leftover, total;
-	u32 max_sg_len;
+	u64 to_process = 0, leftover, total;
 	unsigned long irq_flags;
 	int rc = 0;
+	int data_len;
+	u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
@@ -66,16 +86,16 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
 	 *  1: < SHA256_BLOCK_SIZE: copy into state, return 0
 	 *  2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
 	 */
-	total = sctx->count + len;
+	total = (sctx->count % SHA256_BLOCK_SIZE) + len;
 	if (total < SHA256_BLOCK_SIZE) {
-		memcpy(sctx->buf + sctx->count, data, len);
+		memcpy(sctx->buf + buf_len, data, len);
 		sctx->count += len;
 		goto out;
 	}
 
-	in_sg = nx_ctx->in_sg;
-	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
-			   nx_ctx->ap->sglen);
+	memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE);
+	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
 	do {
 		/*
@@ -83,34 +103,42 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
 		 * this update. This value is also restricted by the sg list
 		 * limits.
 		 */
-		to_process = min_t(u64, total, nx_ctx->ap->databytelen);
-		to_process = min_t(u64, to_process,
-				   NX_PAGE_SIZE * (max_sg_len - 1));
+		to_process = total - to_process;
 		to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
-		leftover = total - to_process;
 
-		if (sctx->count) {
-			in_sg = nx_build_sg_list(nx_ctx->in_sg,
-						 (u8 *) sctx->buf,
-						 sctx->count, max_sg_len);
+		if (buf_len) {
+			data_len = buf_len;
+			rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+						  &nx_ctx->op.inlen,
+						  &data_len,
+						  (u8 *) sctx->buf,
+						  NX_DS_SHA256);
+
+			if (rc || data_len != buf_len)
+				goto out;
 		}
-		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
-					 to_process - sctx->count,
-					 max_sg_len);
-		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
-					sizeof(struct nx_sg);
-
-		if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
-			/*
-			 * we've hit the nx chip previously and we're updating
-			 * again, so copy over the partial digest.
-			 */
-			memcpy(csbcpb->cpb.sha256.input_partial_digest,
+
+		data_len = to_process - buf_len;
+		rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+					  &nx_ctx->op.inlen,
+					  &data_len,
+					  (u8 *) data,
+					  NX_DS_SHA256);
+
+		if (rc)
+			goto out;
+
+		to_process = (data_len + buf_len);
+		leftover = total - to_process;
+
+		/*
+		 * we've hit the nx chip previously and we're updating
+		 * again, so copy over the partial digest.
+		 */
+		memcpy(csbcpb->cpb.sha256.input_partial_digest,
 			       csbcpb->cpb.sha256.message_digest,
 			       SHA256_DIGEST_SIZE);
-		}
 
-		NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
 		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
 			rc = -EINVAL;
 			goto out;
@@ -122,22 +150,19 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
 			goto out;
 
 		atomic_inc(&(nx_ctx->stats->sha256_ops));
-		csbcpb->cpb.sha256.message_bit_length += (u64)
-			(csbcpb->cpb.sha256.spbc * 8);
-
-		/* everything after the first update is continuation */
-		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
 		total -= to_process;
-		data += to_process - sctx->count;
-		sctx->count = 0;
-		in_sg = nx_ctx->in_sg;
+		data += to_process - buf_len;
+		buf_len = 0;
+
 	} while (leftover >= SHA256_BLOCK_SIZE);
 
 	/* copy the leftover back into the state struct */
 	if (leftover)
 		memcpy(sctx->buf, data, leftover);
-	sctx->count = leftover;
+
+	sctx->count += len;
+	memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
 out:
 	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 	return rc;
@@ -148,34 +173,46 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
 	struct sha256_state *sctx = shash_desc_ctx(desc);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-	struct nx_sg *in_sg, *out_sg;
-	u32 max_sg_len;
 	unsigned long irq_flags;
 	int rc;
+	int len;
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
-	max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
-
-	if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+	/* final is represented by continuing the operation and indicating that
+	 * this is not an intermediate operation */
+	if (sctx->count >= SHA256_BLOCK_SIZE) {
 		/* we've hit the nx chip previously, now we're finalizing,
 		 * so copy over the partial digest */
-		memcpy(csbcpb->cpb.sha256.input_partial_digest,
-		       csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
+		memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE);
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+	} else {
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
 	}
 
-	/* final is represented by continuing the operation and indicating that
-	 * this is not an intermediate operation */
-	NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+	csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
 
-	csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8);
+	len = sctx->count & (SHA256_BLOCK_SIZE - 1);
+	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+				  &nx_ctx->op.inlen,
+				  &len,
+				  (u8 *) sctx->buf,
+				  NX_DS_SHA256);
 
-	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
-				 sctx->count, max_sg_len);
-	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE,
-				  max_sg_len);
-	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
-	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+	if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
+		goto out;
+
+	len = SHA256_DIGEST_SIZE;
+	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+				  &nx_ctx->op.outlen,
+				  &len,
+				  out,
+				  NX_DS_SHA256);
+
+	if (rc || len != SHA256_DIGEST_SIZE)
+		goto out;
 
 	if (!nx_ctx->op.outlen) {
 		rc = -EINVAL;
@@ -189,8 +226,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
 
 	atomic_inc(&(nx_ctx->stats->sha256_ops));
 
-	atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8,
-		     &(nx_ctx->stats->sha256_bytes));
+	atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes));
 	memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
 out:
 	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
@@ -200,62 +236,18 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
 static int nx_sha256_export(struct shash_desc *desc, void *out)
 {
 	struct sha256_state *sctx = shash_desc_ctx(desc);
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
-	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-	struct sha256_state *octx = out;
-	unsigned long irq_flags;
-
-	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
-	octx->count = sctx->count +
-		      (csbcpb->cpb.sha256.message_bit_length / 8);
-	memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
-
-	/* if no data has been processed yet, we need to export SHA256's
-	 * initial data, in case this context gets imported into a software
-	 * context */
-	if (csbcpb->cpb.sha256.message_bit_length)
-		memcpy(octx->state, csbcpb->cpb.sha256.message_digest,
-		       SHA256_DIGEST_SIZE);
-	else {
-		octx->state[0] = SHA256_H0;
-		octx->state[1] = SHA256_H1;
-		octx->state[2] = SHA256_H2;
-		octx->state[3] = SHA256_H3;
-		octx->state[4] = SHA256_H4;
-		octx->state[5] = SHA256_H5;
-		octx->state[6] = SHA256_H6;
-		octx->state[7] = SHA256_H7;
-	}
+	memcpy(out, sctx, sizeof(*sctx));
 
-	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 	return 0;
 }
 
 static int nx_sha256_import(struct shash_desc *desc, const void *in)
 {
 	struct sha256_state *sctx = shash_desc_ctx(desc);
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
-	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-	const struct sha256_state *ictx = in;
-	unsigned long irq_flags;
-
-	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
-	memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
+	memcpy(sctx, in, sizeof(*sctx));
 
-	sctx->count = ictx->count & 0x3f;
-	csbcpb->cpb.sha256.message_bit_length = (ictx->count & ~0x3f) * 8;
-
-	if (csbcpb->cpb.sha256.message_bit_length) {
-		memcpy(csbcpb->cpb.sha256.message_digest, ictx->state,
-		       SHA256_DIGEST_SIZE);
-
-		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
-		NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
-	}
-
-	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 	return 0;
 }
 
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 4ae5b0f221d5306fa96c47b423cf143b0454504e..b3adf10226733e57fea0871220f9e0ae294f3299 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -32,7 +32,8 @@ static int nx_sha512_init(struct shash_desc *desc)
 {
 	struct sha512_state *sctx = shash_desc_ctx(desc);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
-	struct nx_sg *out_sg;
+	int len;
+	int rc;
 
 	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
 
@@ -41,10 +42,28 @@ static int nx_sha512_init(struct shash_desc *desc)
 	nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
 
 	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
-	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
-				  SHA512_DIGEST_SIZE, nx_ctx->ap->sglen);
-	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
+	len = SHA512_DIGEST_SIZE;
+	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+				  &nx_ctx->op.outlen,
+				  &len,
+				  (u8 *)sctx->state,
+				  NX_DS_SHA512);
+
+	if (rc || len != SHA512_DIGEST_SIZE)
+		goto out;
+
+	sctx->state[0] = __cpu_to_be64(SHA512_H0);
+	sctx->state[1] = __cpu_to_be64(SHA512_H1);
+	sctx->state[2] = __cpu_to_be64(SHA512_H2);
+	sctx->state[3] = __cpu_to_be64(SHA512_H3);
+	sctx->state[4] = __cpu_to_be64(SHA512_H4);
+	sctx->state[5] = __cpu_to_be64(SHA512_H5);
+	sctx->state[6] = __cpu_to_be64(SHA512_H6);
+	sctx->state[7] = __cpu_to_be64(SHA512_H7);
+	sctx->count[0] = 0;
+
+out:
 	return 0;
 }
 
@@ -54,11 +73,11 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
 	struct sha512_state *sctx = shash_desc_ctx(desc);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-	struct nx_sg *in_sg;
-	u64 to_process, leftover, total, spbc_bits;
-	u32 max_sg_len;
+	u64 to_process, leftover = 0, total;
 	unsigned long irq_flags;
 	int rc = 0;
+	int data_len;
+	u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
@@ -66,16 +85,16 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
 	 *  1: < SHA512_BLOCK_SIZE: copy into state, return 0
 	 *  2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
 	 */
-	total = sctx->count[0] + len;
+	total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len;
 	if (total < SHA512_BLOCK_SIZE) {
-		memcpy(sctx->buf + sctx->count[0], data, len);
+		memcpy(sctx->buf + buf_len, data, len);
 		sctx->count[0] += len;
 		goto out;
 	}
 
-	in_sg = nx_ctx->in_sg;
-	max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
-			   nx_ctx->ap->sglen);
+	memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE);
+	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
 	do {
 		/*
@@ -83,34 +102,43 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
 		 * this update. This value is also restricted by the sg list
 		 * limits.
 		 */
-		to_process = min_t(u64, total, nx_ctx->ap->databytelen);
-		to_process = min_t(u64, to_process,
-				   NX_PAGE_SIZE * (max_sg_len - 1));
+		to_process = total - leftover;
 		to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
 		leftover = total - to_process;
 
-		if (sctx->count[0]) {
-			in_sg = nx_build_sg_list(nx_ctx->in_sg,
-						 (u8 *) sctx->buf,
-						 sctx->count[0], max_sg_len);
+		if (buf_len) {
+			data_len = buf_len;
+			rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+						  &nx_ctx->op.inlen,
+						  &data_len,
+						  (u8 *) sctx->buf,
+						  NX_DS_SHA512);
+
+			if (rc || data_len != buf_len)
+				goto out;
 		}
-		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
-					 to_process - sctx->count[0],
-					 max_sg_len);
-		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
-					sizeof(struct nx_sg);
-
-		if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
-			/*
-			 * we've hit the nx chip previously and we're updating
-			 * again, so copy over the partial digest.
-			 */
-			memcpy(csbcpb->cpb.sha512.input_partial_digest,
+
+		data_len = to_process - buf_len;
+		rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+					  &nx_ctx->op.inlen,
+					  &data_len,
+					  (u8 *) data,
+					  NX_DS_SHA512);
+
+		if (rc || data_len != (to_process - buf_len))
+			goto out;
+
+		to_process = (data_len + buf_len);
+		leftover = total - to_process;
+
+		/*
+		 * we've hit the nx chip previously and we're updating
+		 * again, so copy over the partial digest.
+		 */
+		memcpy(csbcpb->cpb.sha512.input_partial_digest,
 			       csbcpb->cpb.sha512.message_digest,
 			       SHA512_DIGEST_SIZE);
-		}
 
-		NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
 		if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
 			rc = -EINVAL;
 			goto out;
@@ -122,24 +150,18 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
 			goto out;
 
 		atomic_inc(&(nx_ctx->stats->sha512_ops));
-		spbc_bits = csbcpb->cpb.sha512.spbc * 8;
-		csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
-		if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
-			csbcpb->cpb.sha512.message_bit_length_hi++;
-
-		/* everything after the first update is continuation */
-		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
 		total -= to_process;
-		data += to_process - sctx->count[0];
-		sctx->count[0] = 0;
-		in_sg = nx_ctx->in_sg;
+		data += to_process - buf_len;
+		buf_len = 0;
+
 	} while (leftover >= SHA512_BLOCK_SIZE);
 
 	/* copy the leftover back into the state struct */
 	if (leftover)
 		memcpy(sctx->buf, data, leftover);
-	sctx->count[0] = leftover;
+	sctx->count[0] += len;
+	memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
 out:
 	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 	return rc;
@@ -150,39 +172,52 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
 	struct sha512_state *sctx = shash_desc_ctx(desc);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-	struct nx_sg *in_sg, *out_sg;
-	u32 max_sg_len;
 	u64 count0;
 	unsigned long irq_flags;
 	int rc;
+	int len;
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
-	max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
-
-	if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+	/* final is represented by continuing the operation and indicating that
+	 * this is not an intermediate operation */
+	if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
 		/* we've hit the nx chip previously, now we're finalizing,
 		 * so copy over the partial digest */
-		memcpy(csbcpb->cpb.sha512.input_partial_digest,
-		       csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
+		memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state,
+							SHA512_DIGEST_SIZE);
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+	} else {
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+		NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
 	}
 
-	/* final is represented by continuing the operation and indicating that
-	 * this is not an intermediate operation */
 	NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
 
 	count0 = sctx->count[0] * 8;
 
-	csbcpb->cpb.sha512.message_bit_length_lo += count0;
-	if (csbcpb->cpb.sha512.message_bit_length_lo < count0)
-		csbcpb->cpb.sha512.message_bit_length_hi++;
+	csbcpb->cpb.sha512.message_bit_length_lo = count0;
 
-	in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0],
-				 max_sg_len);
-	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE,
-				  max_sg_len);
-	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
-	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+	len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
+	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+				  &nx_ctx->op.inlen,
+				  &len,
+				  (u8 *)sctx->buf,
+				  NX_DS_SHA512);
+
+	if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
+		goto out;
+
+	len = SHA512_DIGEST_SIZE;
+	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+				  &nx_ctx->op.outlen,
+				  &len,
+				  out,
+				  NX_DS_SHA512);
+
+	if (rc)
+		goto out;
 
 	if (!nx_ctx->op.outlen) {
 		rc = -EINVAL;
@@ -195,8 +230,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
 		goto out;
 
 	atomic_inc(&(nx_ctx->stats->sha512_ops));
-	atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8,
-		     &(nx_ctx->stats->sha512_bytes));
+	atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes));
 
 	memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
 out:
@@ -207,74 +241,18 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
 static int nx_sha512_export(struct shash_desc *desc, void *out)
 {
 	struct sha512_state *sctx = shash_desc_ctx(desc);
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
-	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-	struct sha512_state *octx = out;
-	unsigned long irq_flags;
 
-	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+	memcpy(out, sctx, sizeof(*sctx));
 
-	/* move message_bit_length (128 bits) into count and convert its value
-	 * to bytes */
-	octx->count[0] = csbcpb->cpb.sha512.message_bit_length_lo >> 3 |
-			 ((csbcpb->cpb.sha512.message_bit_length_hi & 7) << 61);
-	octx->count[1] = csbcpb->cpb.sha512.message_bit_length_hi >> 3;
-
-	octx->count[0] += sctx->count[0];
-	if (octx->count[0] < sctx->count[0])
-		octx->count[1]++;
-
-	memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
-
-	/* if no data has been processed yet, we need to export SHA512's
-	 * initial data, in case this context gets imported into a software
-	 * context */
-	if (csbcpb->cpb.sha512.message_bit_length_hi ||
-	    csbcpb->cpb.sha512.message_bit_length_lo)
-		memcpy(octx->state, csbcpb->cpb.sha512.message_digest,
-		       SHA512_DIGEST_SIZE);
-	else {
-		octx->state[0] = SHA512_H0;
-		octx->state[1] = SHA512_H1;
-		octx->state[2] = SHA512_H2;
-		octx->state[3] = SHA512_H3;
-		octx->state[4] = SHA512_H4;
-		octx->state[5] = SHA512_H5;
-		octx->state[6] = SHA512_H6;
-		octx->state[7] = SHA512_H7;
-	}
-
-	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 	return 0;
 }
 
 static int nx_sha512_import(struct shash_desc *desc, const void *in)
 {
 	struct sha512_state *sctx = shash_desc_ctx(desc);
-	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
-	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
-	const struct sha512_state *ictx = in;
-	unsigned long irq_flags;
-
-	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
-
-	memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
-	sctx->count[0] = ictx->count[0] & 0x3f;
-	csbcpb->cpb.sha512.message_bit_length_lo = (ictx->count[0] & ~0x3f)
-							<< 3;
-	csbcpb->cpb.sha512.message_bit_length_hi = ictx->count[1] << 3 |
-						   ictx->count[0] >> 61;
-
-	if (csbcpb->cpb.sha512.message_bit_length_hi ||
-	    csbcpb->cpb.sha512.message_bit_length_lo) {
-		memcpy(csbcpb->cpb.sha512.message_digest, ictx->state,
-		       SHA512_DIGEST_SIZE);
 
-		NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
-		NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
-	}
+	memcpy(sctx, in, sizeof(*sctx));
 
-	spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
 	return 0;
 }
 
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 5533fe31c90dff2acc68434b86a31d4497ad3553..a392465d3e3fc5ef38cde9c09b56bef87a4ba2e7 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -90,7 +90,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
  */
 struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
 			       u8           *start_addr,
-			       unsigned int  len,
+			       unsigned int *len,
 			       u32           sgmax)
 {
 	unsigned int sg_len = 0;
@@ -106,7 +106,7 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
 	else
 		sg_addr = __pa(sg_addr);
 
-	end_addr = sg_addr + len;
+	end_addr = sg_addr + *len;
 
 	/* each iteration will write one struct nx_sg element and add the
 	 * length of data described by that element to sg_len. Once @len bytes
@@ -118,7 +118,7 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
 	 * Also when using vmalloc'ed data, every time that a system page
 	 * boundary is crossed the physical address needs to be re-calculated.
 	 */
-	for (sg = sg_head; sg_len < len; sg++) {
+	for (sg = sg_head; sg_len < *len; sg++) {
 		u64 next_page;
 
 		sg->addr = sg_addr;
@@ -133,15 +133,17 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
 				is_vmalloc_addr(start_addr + sg_len)) {
 			sg_addr = page_to_phys(vmalloc_to_page(
 						start_addr + sg_len));
-			end_addr = sg_addr + len - sg_len;
+			end_addr = sg_addr + *len - sg_len;
 		}
 
 		if ((sg - sg_head) == sgmax) {
 			pr_err("nx: scatter/gather list overflow, pid: %d\n",
 			       current->pid);
-			return NULL;
+			sg++;
+			break;
 		}
 	}
+	*len = sg_len;
 
 	/* return the moved sg_head pointer */
 	return sg;
@@ -160,11 +162,11 @@ struct nx_sg *nx_walk_and_build(struct nx_sg       *nx_dst,
 				unsigned int        sglen,
 				struct scatterlist *sg_src,
 				unsigned int        start,
-				unsigned int        src_len)
+				unsigned int       *src_len)
 {
 	struct scatter_walk walk;
 	struct nx_sg *nx_sg = nx_dst;
-	unsigned int n, offset = 0, len = src_len;
+	unsigned int n, offset = 0, len = *src_len;
 	char *dst;
 
 	/* we need to fast forward through @start bytes first */
@@ -182,26 +184,100 @@ struct nx_sg *nx_walk_and_build(struct nx_sg       *nx_dst,
 	 * element we're currently looking at */
 	scatterwalk_advance(&walk, start - offset);
 
-	while (len && nx_sg) {
+	while (len && (nx_sg - nx_dst) < sglen) {
 		n = scatterwalk_clamp(&walk, len);
 		if (!n) {
-			scatterwalk_start(&walk, sg_next(walk.sg));
+			/* In cases where we have scatterlist chain scatterwalk_sg_next
+			 * handles with it properly */
+			scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
 			n = scatterwalk_clamp(&walk, len);
 		}
 		dst = scatterwalk_map(&walk);
 
-		nx_sg = nx_build_sg_list(nx_sg, dst, n, sglen);
+		nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
 		len -= n;
 
 		scatterwalk_unmap(dst);
 		scatterwalk_advance(&walk, n);
 		scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
 	}
+	/* update to_process */
+	*src_len -= len;
 
 	/* return the moved destination pointer */
 	return nx_sg;
 }
 
+/**
+ * trim_sg_list - ensures the bound in sg list.
+ * @sg: sg list head
+ * @end: sg lisg end
+ * @delta:  is the amount we need to crop in order to bound the list.
+ *
+ */
+static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta)
+{
+	while (delta && end > sg) {
+		struct nx_sg *last = end - 1;
+
+		if (last->len > delta) {
+			last->len -= delta;
+			delta = 0;
+		} else {
+			end--;
+			delta -= last->len;
+		}
+	}
+	return (sg - end) * sizeof(struct nx_sg);
+}
+
+/**
+ * nx_sha_build_sg_list - walk and build sg list to sha modes
+ *			  using right bounds and limits.
+ * @nx_ctx: NX crypto context for the lists we're building
+ * @nx_sg: current sg list in or out list
+ * @op_len: current op_len to be used in order to build a sg list
+ * @nbytes:  number or bytes to be processed
+ * @offset: buf offset
+ * @mode: SHA256 or SHA512
+ */
+int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
+			  struct nx_sg 	      *nx_in_outsg,
+			  s64		      *op_len,
+			  unsigned int        *nbytes,
+			  u8 		      *offset,
+			  u32		      mode)
+{
+	unsigned int delta = 0;
+	unsigned int total = *nbytes;
+	struct nx_sg *nx_insg = nx_in_outsg;
+	unsigned int max_sg_len;
+
+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+	max_sg_len = min_t(u64, max_sg_len,
+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+	*nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
+	nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
+
+	switch (mode) {
+	case NX_DS_SHA256:
+		if (*nbytes < total)
+			delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
+		break;
+	case NX_DS_SHA512:
+		if (*nbytes < total)
+			delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
+		break;
+	default:
+		return -EINVAL;
+	}
+	*op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
+
+	return 0;
+}
+
 /**
  * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
  *                     scatterlists based on them.
@@ -223,26 +299,39 @@ int nx_build_sg_lists(struct nx_crypto_ctx  *nx_ctx,
 		      struct blkcipher_desc *desc,
 		      struct scatterlist    *dst,
 		      struct scatterlist    *src,
-		      unsigned int           nbytes,
+		      unsigned int          *nbytes,
 		      unsigned int           offset,
 		      u8                    *iv)
 {
+	unsigned int delta = 0;
+	unsigned int total = *nbytes;
 	struct nx_sg *nx_insg = nx_ctx->in_sg;
 	struct nx_sg *nx_outsg = nx_ctx->out_sg;
+	unsigned int max_sg_len;
+
+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+	max_sg_len = min_t(u64, max_sg_len,
+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
 
 	if (iv)
 		memcpy(iv, desc->info, AES_BLOCK_SIZE);
 
-	nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src,
-				    offset, nbytes);
-	nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst,
-				    offset, nbytes);
+	*nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
+
+	nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst,
+					offset, nbytes);
+	nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src,
+					offset, nbytes);
+
+	if (*nbytes < total)
+		delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1));
 
 	/* these lengths should be negative, which will indicate to phyp that
 	 * the input and output parameters are scatterlists, not linear
 	 * buffers */
-	nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
-	nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
+	nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta);
+	nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta);
 
 	return 0;
 }
@@ -540,10 +629,10 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
 
 	/* we need an extra page for csbcpb_aead for these modes */
 	if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
-		nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
+		nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) +
 				   sizeof(struct nx_csbcpb);
 	else
-		nx_ctx->kmem_len = (3 * NX_PAGE_SIZE) +
+		nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
 				   sizeof(struct nx_csbcpb);
 
 	nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index befda07ca1da9e0978c59b9061cf79f2114bb9fb..6c9ecaaead52fdb390aa08f6af9759bdbde19b31 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -153,13 +153,15 @@ void nx_crypto_ctx_exit(struct crypto_tfm *tfm);
 void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
 int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
 		  u32 may_sleep);
-struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32);
+int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *,
+			 s64 *, unsigned int *, u8 *, u32);
+struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
 int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
-		      struct scatterlist *, struct scatterlist *, unsigned int,
+		      struct scatterlist *, struct scatterlist *, unsigned int *,
 		      unsigned int, u8 *);
 struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
 				struct scatterlist *, unsigned int,
-				unsigned int);
+				unsigned int *);
 
 #ifdef CONFIG_DEBUG_FS
 #define NX_DEBUGFS_INIT(drv)	nx_debugfs_init(drv)
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 633ba945e153da2a61f88f4580afab6caaff2c73..c178ed8c3908d3a92e55432aecb86e0e6e1501ee 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -563,4 +563,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michal Ludvig");
 
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index bace885634f244b405113477010f2719351e997d..95f7d27ce491f000458a257e5dfa55a6105b433b 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -593,7 +593,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michal Ludvig");
 
-MODULE_ALIAS("sha1-all");
-MODULE_ALIAS("sha256-all");
-MODULE_ALIAS("sha1-padlock");
-MODULE_ALIAS("sha256-padlock");
+MODULE_ALIAS_CRYPTO("sha1-all");
+MODULE_ALIAS_CRYPTO("sha256-all");
+MODULE_ALIAS_CRYPTO("sha1-padlock");
+MODULE_ALIAS_CRYPTO("sha256-padlock");
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index fe7b3f06f6e62ac9ab1e35a9bfb6a2c3f267c35a..2ed425664a164b2988715f9b16b2abb53e25f7d6 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -56,8 +56,6 @@
 #define PCI_VENDOR_ID_INTEL 0x8086
 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435
-#define ADF_DH895XCC_PMISC_BAR 1
-#define ADF_DH895XCC_ETR_BAR 2
 #define ADF_PCI_MAX_BARS 3
 #define ADF_DEVICE_NAME_LENGTH 32
 #define ADF_ETR_MAX_RINGS_PER_BANK 16
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index c29d4c3926bfd0a80a59ef6334ea6cf71cdd059d..10ce4a2854ab543f43bd732b90f220818fa7c114 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -90,7 +90,7 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
 	uint16_t ppdstat = 0, bridge_ctl = 0;
 	int pending = 0;
 
-	pr_info("QAT: Reseting device qat_dev%d\n", accel_dev->accel_id);
+	pr_info("QAT: Resetting device qat_dev%d\n", accel_dev->accel_id);
 	pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
 	pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
 	if (pending) {
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 244d73378f0e063e37392ede376cf44dd4b831bc..7ee93f881db698af823bbd5d94fe0a01abcd900f 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -52,6 +52,7 @@
 #include <linux/pci.h>
 #include <linux/cdev.h>
 #include <linux/uaccess.h>
+#include <linux/crypto.h>
 
 #include "adf_accel_devices.h"
 #include "adf_common_drv.h"
@@ -487,4 +488,4 @@ module_exit(adf_unregister_ctl_device_driver);
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Intel");
 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
-MODULE_ALIAS("intel_qat");
+MODULE_ALIAS_CRYPTO("intel_qat");
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
index ae71555c08683c7b196704f52a6245e025941fbd..4a0a829d45002673897235d4d50cf264d5014133 100644
--- a/drivers/crypto/qat/qat_common/adf_dev_mgr.c
+++ b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
@@ -129,12 +129,13 @@ struct adf_accel_dev *adf_devmgr_get_first(void)
  * Function returns acceleration device associated with the given pci device.
  * To be used by QAT device specific drivers.
  *
- * Return: pinter to accel_dev or NULL if not found.
+ * Return: pointer to accel_dev or NULL if not found.
  */
 struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
 {
 	struct list_head *itr;
 
+	mutex_lock(&table_lock);
 	list_for_each(itr, &accel_table) {
 		struct adf_accel_dev *ptr =
 				list_entry(itr, struct adf_accel_dev, list);
@@ -144,6 +145,7 @@ struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
 			return ptr;
 		}
 	}
+	mutex_unlock(&table_lock);
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
@@ -152,6 +154,7 @@ struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
 {
 	struct list_head *itr;
 
+	mutex_lock(&table_lock);
 	list_for_each(itr, &accel_table) {
 		struct adf_accel_dev *ptr =
 				list_entry(itr, struct adf_accel_dev, list);
@@ -161,6 +164,7 @@ struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
 			return ptr;
 		}
 	}
+	mutex_unlock(&table_lock);
 	return NULL;
 }
 
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 9dd2cb72a4e862e8203b9a0ffdc109ce4480c0be..7dd54aaee9fac1751b920ce5406d4b163e2d9c49 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -376,8 +376,9 @@ static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
 	return 0;
 }
 
-static void adf_enable_coalesc(struct adf_etr_bank_data *bank,
-			       const char *section, uint32_t bank_num_in_accel)
+static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
+				  const char *section,
+				  uint32_t bank_num_in_accel)
 {
 	if (adf_get_cfg_int(bank->accel_dev, section,
 			    ADF_ETRMGR_COALESCE_TIMER_FORMAT,
@@ -396,7 +397,7 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
 	struct adf_etr_ring_data *ring;
 	struct adf_etr_ring_data *tx_ring;
-	uint32_t i, coalesc_enabled;
+	uint32_t i, coalesc_enabled = 0;
 
 	memset(bank, 0, sizeof(*bank));
 	bank->bank_number = bank_num;
@@ -407,10 +408,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
 	/* Enable IRQ coalescing always. This will allow to use
 	 * the optimised flag and coalesc register.
 	 * If it is disabled in the config file just use min time value */
-	if (adf_get_cfg_int(accel_dev, "Accelerator0",
-			    ADF_ETRMGR_COALESCING_ENABLED_FORMAT,
-			    bank_num, &coalesc_enabled) && coalesc_enabled)
-		adf_enable_coalesc(bank, "Accelerator0", bank_num);
+	if ((adf_get_cfg_int(accel_dev, "Accelerator0",
+			     ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
+			     &coalesc_enabled) == 0) && coalesc_enabled)
+		adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
 	else
 		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
 
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
index 91d88d67658045b1a6481f0e05965b2571672458..160c9a36c9198689d957c010e14930e3b3e8ffae 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -83,14 +83,14 @@
 #define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
 #define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
 
-/* Valid internal msg size values internal */
+/* Valid internal msg size values */
 #define ADF_MSG_SIZE_32 0x01
 #define ADF_MSG_SIZE_64 0x02
 #define ADF_MSG_SIZE_128 0x04
 #define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
 #define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
 
-/* Size to bytes conversion macros for ring and msg values */
+/* Size to bytes conversion macros for ring and msg size values */
 #define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
 #define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
 #define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
@@ -100,8 +100,11 @@
 #define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
 				ADF_RING_SIZE_4K : SIZE)
 #define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+				SIZE) & ~0x4)
+/* Max outstanding requests */
 #define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
-	((((1 << (RING_SIZE - 1)) << 4) >> MSG_SIZE) - 1)
+	((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
 #define BUILD_RING_CONFIG(size)	\
 	((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
 	| (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 9e9619cd4a79b958dae8e6ceee84d0d267325322..19eea1c832ac5e9cb943a08ede29276a11bbb636 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -161,7 +161,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
 	__be64 *hash512_state_out;
 	int i, offset;
 
-	memset(auth_state.data, '\0', MAX_AUTH_STATE_SIZE + 64);
+	memzero_explicit(auth_state.data, MAX_AUTH_STATE_SIZE + 64);
 	shash->tfm = ctx->hash_tfm;
 	shash->flags = 0x0;
 
@@ -174,13 +174,13 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
 
 		memcpy(ipad, buff, digest_size);
 		memcpy(opad, buff, digest_size);
-		memset(ipad + digest_size, 0, block_size - digest_size);
-		memset(opad + digest_size, 0, block_size - digest_size);
+		memzero_explicit(ipad + digest_size, block_size - digest_size);
+		memzero_explicit(opad + digest_size, block_size - digest_size);
 	} else {
 		memcpy(ipad, auth_key, auth_keylen);
 		memcpy(opad, auth_key, auth_keylen);
-		memset(ipad + auth_keylen, 0, block_size - auth_keylen);
-		memset(opad + auth_keylen, 0, block_size - auth_keylen);
+		memzero_explicit(ipad + auth_keylen, block_size - auth_keylen);
+		memzero_explicit(opad + auth_keylen, block_size - auth_keylen);
 	}
 
 	for (i = 0; i < block_size; i++) {
@@ -254,6 +254,8 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
 	default:
 		return -EFAULT;
 	}
+	memzero_explicit(ipad, block_size);
+	memzero_explicit(opad, block_size);
 	return 0;
 }
 
@@ -466,7 +468,6 @@ static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
 		break;
 	default:
 		goto bad_key;
-		break;
 	}
 
 	if (qat_alg_init_enc_session(ctx, alg, &keys))
@@ -493,12 +494,12 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
 	if (ctx->enc_cd) {
 		/* rekeying */
 		dev = &GET_DEV(ctx->inst->accel_dev);
-		memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
-		memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
-		memset(&ctx->enc_fw_req_tmpl, 0,
-		       sizeof(struct icp_qat_fw_la_bulk_req));
-		memset(&ctx->dec_fw_req_tmpl, 0,
-		       sizeof(struct icp_qat_fw_la_bulk_req));
+		memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
+		memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
+		memzero_explicit(&ctx->enc_fw_req_tmpl,
+				 sizeof(struct icp_qat_fw_la_bulk_req));
+		memzero_explicit(&ctx->dec_fw_req_tmpl,
+				 sizeof(struct icp_qat_fw_la_bulk_req));
 	} else {
 		/* new key */
 		int node = get_current_node();
@@ -535,10 +536,12 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
 	return 0;
 
 out_free_all:
+	memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
 			  ctx->dec_cd, ctx->dec_cd_paddr);
 	ctx->dec_cd = NULL;
 out_free_enc:
+	memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
 	dma_free_coherent(dev, sizeof(struct qat_alg_cd),
 			  ctx->enc_cd, ctx->enc_cd_paddr);
 	ctx->enc_cd = NULL;
@@ -836,7 +839,7 @@ static int qat_alg_init(struct crypto_tfm *tfm,
 {
 	struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
 
-	memset(ctx, '\0', sizeof(*ctx));
+	memzero_explicit(ctx, sizeof(*ctx));
 	ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
 	if (IS_ERR(ctx->hash_tfm))
 		return -EFAULT;
@@ -876,12 +879,16 @@ static void qat_alg_exit(struct crypto_tfm *tfm)
 		return;
 
 	dev = &GET_DEV(inst->accel_dev);
-	if (ctx->enc_cd)
+	if (ctx->enc_cd) {
+		memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
 				  ctx->enc_cd, ctx->enc_cd_paddr);
-	if (ctx->dec_cd)
+	}
+	if (ctx->dec_cd) {
+		memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
 		dma_free_coherent(dev, sizeof(struct qat_alg_cd),
 				  ctx->dec_cd, ctx->dec_cd_paddr);
+	}
 	qat_crypto_put_instance(inst);
 }
 
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
index 9b8a31521ff3bdd0daacabfc2cfdd98c089469dc..b818c19713bfa8ea05904d008876d41013d39dc9 100644
--- a/drivers/crypto/qat/qat_common/qat_hal.c
+++ b/drivers/crypto/qat/qat_common/qat_hal.c
@@ -679,7 +679,8 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
 	struct icp_qat_fw_loader_handle *handle;
 	struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-	struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR];
+	struct adf_bar *bar =
+			&pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
 
 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
 	if (!handle)
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index 65dd1ff93d3bf6b0ba5b0bf9cfb56fc5d15edcbc..01e0be21e93a8370cd3b799be54b99d851946d5e 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -48,6 +48,8 @@
 #define ADF_DH895x_HW_DATA_H_
 
 /* PCIe configuration space */
+#define ADF_DH895XCC_PMISC_BAR 1
+#define ADF_DH895XCC_ETR_BAR 2
 #define ADF_DH895XCC_RX_RINGS_OFFSET 8
 #define ADF_DH895XCC_TX_RINGS_MASK 0xFF
 #define ADF_DH895XCC_FUSECTL_OFFSET 0x40
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index d96ee21b9b77815f8ed018a78a36d43db7fc3c95..fe8f89697ad8c4c93d6e7df029a1fe8082c578b2 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -186,10 +186,8 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
 	accel_dev->accel_pci_dev.msix_entries.names = names;
 	return 0;
 err:
-	for (i = 0; i < msix_num_entries; i++) {
-		if (*(names + i))
-			kfree(*(names + i));
-	}
+	for (i = 0; i < msix_num_entries; i++)
+		kfree(*(names + i));
 	kfree(entries);
 	kfree(names);
 	return -ENOMEM;
@@ -203,10 +201,8 @@ static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
 	int i;
 
 	kfree(accel_dev->accel_pci_dev.msix_entries.entries);
-	for (i = 0; i < msix_num_entries; i++) {
-		if (*(names + i))
-			kfree(*(names + i));
-	}
+	for (i = 0; i < msix_num_entries; i++)
+		kfree(*(names + i));
 	kfree(names);
 }
 
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 164e1ec624e3c7e2a9ca6234a8100f98cec8894d..579f539e5975dd99ad176cf8c5a144394bc41f22 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -3,6 +3,7 @@
  *
  * Support for SAHARA cryptographic accelerator.
  *
+ * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
  * Copyright (c) 2013 Vista Silicon S.L.
  * Author: Javier Martin <javier.martin@vista-silicon.com>
  *
@@ -15,6 +16,10 @@
 
 #include <crypto/algapi.h>
 #include <crypto/aes.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
 
 #include <linux/clk.h>
 #include <linux/crypto.h>
@@ -22,12 +27,19 @@
 #include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/kernel.h>
+#include <linux/kthread.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 
+#define SHA_BUFFER_LEN		PAGE_SIZE
+#define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
+
 #define SAHARA_NAME "sahara"
 #define SAHARA_VERSION_3	3
+#define SAHARA_VERSION_4	4
 #define SAHARA_TIMEOUT_MS	1000
 #define SAHARA_MAX_HW_DESC	2
 #define SAHARA_MAX_HW_LINK	20
@@ -36,7 +48,6 @@
 #define FLAGS_ENCRYPT		BIT(0)
 #define FLAGS_CBC		BIT(1)
 #define FLAGS_NEW_KEY		BIT(3)
-#define FLAGS_BUSY		4
 
 #define SAHARA_HDR_BASE			0x00800000
 #define SAHARA_HDR_SKHA_ALG_AES	0
@@ -50,6 +61,23 @@
 #define SAHARA_HDR_CHA_MDHA		(2 << 28)
 #define SAHARA_HDR_PARITY_BIT		(1 << 31)
 
+#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
+#define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
+#define SAHARA_HDR_MDHA_HASH		0xA0850000
+#define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
+#define SAHARA_HDR_MDHA_ALG_SHA1	0
+#define SAHARA_HDR_MDHA_ALG_MD5		1
+#define SAHARA_HDR_MDHA_ALG_SHA256	2
+#define SAHARA_HDR_MDHA_ALG_SHA224	3
+#define SAHARA_HDR_MDHA_PDATA		(1 << 2)
+#define SAHARA_HDR_MDHA_HMAC		(1 << 3)
+#define SAHARA_HDR_MDHA_INIT		(1 << 5)
+#define SAHARA_HDR_MDHA_IPAD		(1 << 6)
+#define SAHARA_HDR_MDHA_OPAD		(1 << 7)
+#define SAHARA_HDR_MDHA_SWAP		(1 << 8)
+#define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
+#define SAHARA_HDR_MDHA_SSL		(1 << 10)
+
 /* SAHARA can only process one request at a time */
 #define SAHARA_QUEUE_LENGTH	1
 
@@ -117,31 +145,74 @@ struct sahara_hw_link {
 };
 
 struct sahara_ctx {
-	struct sahara_dev *dev;
 	unsigned long flags;
+
+	/* AES-specific context */
 	int keylen;
 	u8 key[AES_KEYSIZE_128];
 	struct crypto_ablkcipher *fallback;
+
+	/* SHA-specific context */
+	struct crypto_shash *shash_fallback;
 };
 
 struct sahara_aes_reqctx {
 	unsigned long mode;
 };
 
+/*
+ * struct sahara_sha_reqctx - private data per request
+ * @buf: holds data for requests smaller than block_size
+ * @rembuf: used to prepare one block_size-aligned request
+ * @context: hw-specific context for request. Digest is extracted from this
+ * @mode: specifies what type of hw-descriptor needs to be built
+ * @digest_size: length of digest for this request
+ * @context_size: length of hw-context for this request.
+ *                Always digest_size + 4
+ * @buf_cnt: number of bytes saved in buf
+ * @sg_in_idx: number of hw links
+ * @in_sg: scatterlist for input data
+ * @in_sg_chain: scatterlists for chained input data
+ * @in_sg_chained: specifies if chained scatterlists are used or not
+ * @total: total number of bytes for transfer
+ * @last: is this the last block
+ * @first: is this the first block
+ * @active: inside a transfer
+ */
+struct sahara_sha_reqctx {
+	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
+	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
+	u8			context[SHA256_DIGEST_SIZE + 4];
+	struct mutex		mutex;
+	unsigned int		mode;
+	unsigned int		digest_size;
+	unsigned int		context_size;
+	unsigned int		buf_cnt;
+	unsigned int		sg_in_idx;
+	struct scatterlist	*in_sg;
+	struct scatterlist	in_sg_chain[2];
+	bool			in_sg_chained;
+	size_t			total;
+	unsigned int		last;
+	unsigned int		first;
+	unsigned int		active;
+};
+
 struct sahara_dev {
 	struct device		*device;
+	unsigned int		version;
 	void __iomem		*regs_base;
 	struct clk		*clk_ipg;
 	struct clk		*clk_ahb;
+	struct mutex		queue_mutex;
+	struct task_struct	*kthread;
+	struct completion	dma_completion;
 
 	struct sahara_ctx	*ctx;
 	spinlock_t		lock;
 	struct crypto_queue	queue;
 	unsigned long		flags;
 
-	struct tasklet_struct	done_task;
-	struct tasklet_struct	queue_task;
-
 	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
 	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
 
@@ -151,10 +222,12 @@ struct sahara_dev {
 	u8			*iv_base;
 	dma_addr_t		iv_phys_base;
 
+	u8			*context_base;
+	dma_addr_t		context_phys_base;
+
 	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
 	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
 
-	struct ablkcipher_request *req;
 	size_t			total;
 	struct scatterlist	*in_sg;
 	unsigned int		nb_in_sg;
@@ -162,7 +235,6 @@ struct sahara_dev {
 	unsigned int		nb_out_sg;
 
 	u32			error;
-	struct timer_list	watchdog;
 };
 
 static struct sahara_dev *dev_ptr;
@@ -401,34 +473,6 @@ static void sahara_dump_links(struct sahara_dev *dev)
 	dev_dbg(dev->device, "\n");
 }
 
-static void sahara_aes_done_task(unsigned long data)
-{
-	struct sahara_dev *dev = (struct sahara_dev *)data;
-
-	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
-		DMA_TO_DEVICE);
-	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
-		DMA_FROM_DEVICE);
-
-	spin_lock(&dev->lock);
-	clear_bit(FLAGS_BUSY, &dev->flags);
-	spin_unlock(&dev->lock);
-
-	dev->req->base.complete(&dev->req->base, dev->error);
-}
-
-static void sahara_watchdog(unsigned long data)
-{
-	struct sahara_dev *dev = (struct sahara_dev *)data;
-	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
-	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
-
-	sahara_decode_status(dev, stat);
-	sahara_decode_error(dev, err);
-	dev->error = -ETIMEDOUT;
-	sahara_aes_done_task(data);
-}
-
 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 {
 	struct sahara_ctx *ctx = dev->ctx;
@@ -512,9 +556,6 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 	sahara_dump_descriptors(dev);
 	sahara_dump_links(dev);
 
-	/* Start processing descriptor chain. */
-	mod_timer(&dev->watchdog,
-		  jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 
 	return 0;
@@ -529,37 +570,19 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 	return -EINVAL;
 }
 
-static void sahara_aes_queue_task(unsigned long data)
+static int sahara_aes_process(struct ablkcipher_request *req)
 {
-	struct sahara_dev *dev = (struct sahara_dev *)data;
-	struct crypto_async_request *async_req, *backlog;
+	struct sahara_dev *dev = dev_ptr;
 	struct sahara_ctx *ctx;
 	struct sahara_aes_reqctx *rctx;
-	struct ablkcipher_request *req;
 	int ret;
 
-	spin_lock(&dev->lock);
-	backlog = crypto_get_backlog(&dev->queue);
-	async_req = crypto_dequeue_request(&dev->queue);
-	if (!async_req)
-		clear_bit(FLAGS_BUSY, &dev->flags);
-	spin_unlock(&dev->lock);
-
-	if (!async_req)
-		return;
-
-	if (backlog)
-		backlog->complete(backlog, -EINPROGRESS);
-
-	req = ablkcipher_request_cast(async_req);
-
 	/* Request is ready to be dispatched by the device */
 	dev_dbg(dev->device,
 		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
 		req->nbytes, req->src, req->dst);
 
 	/* assign new request to device */
-	dev->req = req;
 	dev->total = req->nbytes;
 	dev->in_sg = req->src;
 	dev->out_sg = req->dst;
@@ -573,16 +596,25 @@ static void sahara_aes_queue_task(unsigned long data)
 		memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
 
 	/* assign new context to device */
-	ctx->dev = dev;
 	dev->ctx = ctx;
 
+	reinit_completion(&dev->dma_completion);
+
 	ret = sahara_hw_descriptor_create(dev);
-	if (ret < 0) {
-		spin_lock(&dev->lock);
-		clear_bit(FLAGS_BUSY, &dev->flags);
-		spin_unlock(&dev->lock);
-		dev->req->base.complete(&dev->req->base, ret);
+
+	ret = wait_for_completion_timeout(&dev->dma_completion,
+				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+	if (!ret) {
+		dev_err(dev->device, "AES timeout\n");
+		return -ETIMEDOUT;
 	}
+
+	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
+		DMA_TO_DEVICE);
+	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+		DMA_FROM_DEVICE);
+
+	return 0;
 }
 
 static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -624,12 +656,9 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 
 static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 {
-	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
-		crypto_ablkcipher_reqtfm(req));
 	struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 	struct sahara_dev *dev = dev_ptr;
 	int err = 0;
-	int busy;
 
 	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
 		req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
@@ -640,16 +669,13 @@ static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 		return -EINVAL;
 	}
 
-	ctx->dev = dev;
-
 	rctx->mode = mode;
-	spin_lock_bh(&dev->lock);
+
+	mutex_lock(&dev->queue_mutex);
 	err = ablkcipher_enqueue_request(&dev->queue, req);
-	busy = test_and_set_bit(FLAGS_BUSY, &dev->flags);
-	spin_unlock_bh(&dev->lock);
+	mutex_unlock(&dev->queue_mutex);
 
-	if (!busy)
-		tasklet_schedule(&dev->queue_task);
+	wake_up_process(dev->kthread);
 
 	return err;
 }
@@ -752,6 +778,484 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
 	ctx->fallback = NULL;
 }
 
+static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
+			      struct sahara_sha_reqctx *rctx)
+{
+	u32 hdr = 0;
+
+	hdr = rctx->mode;
+
+	if (rctx->first) {
+		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
+		hdr |= SAHARA_HDR_MDHA_INIT;
+	} else {
+		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
+	}
+
+	if (rctx->last)
+		hdr |= SAHARA_HDR_MDHA_PDATA;
+
+	if (hweight_long(hdr) % 2 == 0)
+		hdr |= SAHARA_HDR_PARITY_BIT;
+
+	return hdr;
+}
+
+static int sahara_sha_hw_links_create(struct sahara_dev *dev,
+				       struct sahara_sha_reqctx *rctx,
+				       int start)
+{
+	struct scatterlist *sg;
+	unsigned int i;
+	int ret;
+
+	dev->in_sg = rctx->in_sg;
+
+	dev->nb_in_sg = sahara_sg_length(dev->in_sg, rctx->total);
+	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
+		dev_err(dev->device, "not enough hw links (%d)\n",
+			dev->nb_in_sg + dev->nb_out_sg);
+		return -EINVAL;
+	}
+
+	if (rctx->in_sg_chained) {
+		i = start;
+		sg = dev->in_sg;
+		while (sg) {
+			ret = dma_map_sg(dev->device, sg, 1,
+					 DMA_TO_DEVICE);
+			if (!ret)
+				return -EFAULT;
+
+			dev->hw_link[i]->len = sg->length;
+			dev->hw_link[i]->p = sg->dma_address;
+			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+			sg = sg_next(sg);
+			i += 1;
+		}
+		dev->hw_link[i-1]->next = 0;
+	} else {
+		sg = dev->in_sg;
+		ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+				 DMA_TO_DEVICE);
+		if (!ret)
+			return -EFAULT;
+
+		for (i = start; i < dev->nb_in_sg + start; i++) {
+			dev->hw_link[i]->len = sg->length;
+			dev->hw_link[i]->p = sg->dma_address;
+			if (i == (dev->nb_in_sg + start - 1)) {
+				dev->hw_link[i]->next = 0;
+			} else {
+				dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
+				sg = sg_next(sg);
+			}
+		}
+	}
+
+	return i;
+}
+
+static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
+						struct sahara_sha_reqctx *rctx,
+						struct ahash_request *req,
+						int index)
+{
+	unsigned result_len;
+	int i = index;
+
+	if (rctx->first)
+		/* Create initial descriptor: #8*/
+		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
+	else
+		/* Create hash descriptor: #10. Must follow #6. */
+		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
+
+	dev->hw_desc[index]->len1 = rctx->total;
+	if (dev->hw_desc[index]->len1 == 0) {
+		/* if len1 is 0, p1 must be 0, too */
+		dev->hw_desc[index]->p1 = 0;
+		rctx->sg_in_idx = 0;
+	} else {
+		/* Create input links */
+		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
+		i = sahara_sha_hw_links_create(dev, rctx, index);
+
+		rctx->sg_in_idx = index;
+		if (i < 0)
+			return i;
+	}
+
+	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
+
+	/* Save the context for the next operation */
+	result_len = rctx->context_size;
+	dev->hw_link[i]->p = dev->context_phys_base;
+
+	dev->hw_link[i]->len = result_len;
+	dev->hw_desc[index]->len2 = result_len;
+
+	dev->hw_link[i]->next = 0;
+
+	return 0;
+}
+
+/*
+ * Load descriptor aka #6
+ *
+ * To load a previously saved context back to the MDHA unit
+ *
+ * p1: Saved Context
+ * p2: NULL
+ *
+ */
+static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
+						struct sahara_sha_reqctx *rctx,
+						struct ahash_request *req,
+						int index)
+{
+	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
+
+	dev->hw_desc[index]->len1 = rctx->context_size;
+	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
+	dev->hw_desc[index]->len2 = 0;
+	dev->hw_desc[index]->p2 = 0;
+
+	dev->hw_link[index]->len = rctx->context_size;
+	dev->hw_link[index]->p = dev->context_phys_base;
+	dev->hw_link[index]->next = 0;
+
+	return 0;
+}
+
+static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
+{
+	if (!sg || !sg->length)
+		return nbytes;
+
+	while (nbytes && sg) {
+		if (nbytes <= sg->length) {
+			sg->length = nbytes;
+			sg_mark_end(sg);
+			break;
+		}
+		nbytes -= sg->length;
+		sg = scatterwalk_sg_next(sg);
+	}
+
+	return nbytes;
+}
+
+static int sahara_sha_prepare_request(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+	unsigned int hash_later;
+	unsigned int block_size;
+	unsigned int len;
+
+	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+	/* append bytes from previous operation */
+	len = rctx->buf_cnt + req->nbytes;
+
+	/* only the last transfer can be padded in hardware */
+	if (!rctx->last && (len < block_size)) {
+		/* to few data, save for next operation */
+		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
+					 0, req->nbytes, 0);
+		rctx->buf_cnt += req->nbytes;
+
+		return 0;
+	}
+
+	/* add data from previous operation first */
+	if (rctx->buf_cnt)
+		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
+
+	/* data must always be a multiple of block_size */
+	hash_later = rctx->last ? 0 : len & (block_size - 1);
+	if (hash_later) {
+		unsigned int offset = req->nbytes - hash_later;
+		/* Save remaining bytes for later use */
+		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
+					hash_later, 0);
+	}
+
+	/* nbytes should now be multiple of blocksize */
+	req->nbytes = req->nbytes - hash_later;
+
+	sahara_walk_and_recalc(req->src, req->nbytes);
+
+	/* have data from previous operation and current */
+	if (rctx->buf_cnt && req->nbytes) {
+		sg_init_table(rctx->in_sg_chain, 2);
+		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
+
+		scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src);
+
+		rctx->total = req->nbytes + rctx->buf_cnt;
+		rctx->in_sg = rctx->in_sg_chain;
+
+		rctx->in_sg_chained = true;
+		req->src = rctx->in_sg_chain;
+	/* only data from previous operation */
+	} else if (rctx->buf_cnt) {
+		if (req->src)
+			rctx->in_sg = req->src;
+		else
+			rctx->in_sg = rctx->in_sg_chain;
+		/* buf was copied into rembuf above */
+		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
+		rctx->total = rctx->buf_cnt;
+		rctx->in_sg_chained = false;
+	/* no data from previous operation */
+	} else {
+		rctx->in_sg = req->src;
+		rctx->total = req->nbytes;
+		req->src = rctx->in_sg;
+		rctx->in_sg_chained = false;
+	}
+
+	/* on next call, we only have the remaining data in the buffer */
+	rctx->buf_cnt = hash_later;
+
+	return -EINPROGRESS;
+}
+
+static void sahara_sha_unmap_sg(struct sahara_dev *dev,
+				struct sahara_sha_reqctx *rctx)
+{
+	struct scatterlist *sg;
+
+	if (rctx->in_sg_chained) {
+		sg = dev->in_sg;
+		while (sg) {
+			dma_unmap_sg(dev->device, sg, 1, DMA_TO_DEVICE);
+			sg = sg_next(sg);
+		}
+	} else {
+		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
+			DMA_TO_DEVICE);
+	}
+}
+
+static int sahara_sha_process(struct ahash_request *req)
+{
+	struct sahara_dev *dev = dev_ptr;
+	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+	int ret = -EINPROGRESS;
+
+	ret = sahara_sha_prepare_request(req);
+	if (!ret)
+		return ret;
+
+	if (rctx->first) {
+		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
+		dev->hw_desc[0]->next = 0;
+		rctx->first = 0;
+	} else {
+		memcpy(dev->context_base, rctx->context, rctx->context_size);
+
+		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
+		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
+		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
+		dev->hw_desc[1]->next = 0;
+	}
+
+	sahara_dump_descriptors(dev);
+	sahara_dump_links(dev);
+
+	reinit_completion(&dev->dma_completion);
+
+	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
+
+	ret = wait_for_completion_timeout(&dev->dma_completion,
+				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
+	if (!ret) {
+		dev_err(dev->device, "SHA timeout\n");
+		return -ETIMEDOUT;
+	}
+
+	if (rctx->sg_in_idx)
+		sahara_sha_unmap_sg(dev, rctx);
+
+	memcpy(rctx->context, dev->context_base, rctx->context_size);
+
+	if (req->result)
+		memcpy(req->result, rctx->context, rctx->digest_size);
+
+	return 0;
+}
+
+static int sahara_queue_manage(void *data)
+{
+	struct sahara_dev *dev = (struct sahara_dev *)data;
+	struct crypto_async_request *async_req;
+	int ret = 0;
+
+	do {
+		__set_current_state(TASK_INTERRUPTIBLE);
+
+		mutex_lock(&dev->queue_mutex);
+		async_req = crypto_dequeue_request(&dev->queue);
+		mutex_unlock(&dev->queue_mutex);
+
+		if (async_req) {
+			if (crypto_tfm_alg_type(async_req->tfm) ==
+			    CRYPTO_ALG_TYPE_AHASH) {
+				struct ahash_request *req =
+					ahash_request_cast(async_req);
+
+				ret = sahara_sha_process(req);
+			} else {
+				struct ablkcipher_request *req =
+					ablkcipher_request_cast(async_req);
+
+				ret = sahara_aes_process(req);
+			}
+
+			async_req->complete(async_req, ret);
+
+			continue;
+		}
+
+		schedule();
+	} while (!kthread_should_stop());
+
+	return 0;
+}
+
+static int sahara_sha_enqueue(struct ahash_request *req, int last)
+{
+	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+	struct sahara_dev *dev = dev_ptr;
+	int ret;
+
+	if (!req->nbytes && !last)
+		return 0;
+
+	mutex_lock(&rctx->mutex);
+	rctx->last = last;
+
+	if (!rctx->active) {
+		rctx->active = 1;
+		rctx->first = 1;
+	}
+
+	mutex_lock(&dev->queue_mutex);
+	ret = crypto_enqueue_request(&dev->queue, &req->base);
+	mutex_unlock(&dev->queue_mutex);
+
+	wake_up_process(dev->kthread);
+	mutex_unlock(&rctx->mutex);
+
+	return ret;
+}
+
+static int sahara_sha_init(struct ahash_request *req)
+{
+	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+
+	memset(rctx, 0, sizeof(*rctx));
+
+	switch (crypto_ahash_digestsize(tfm)) {
+	case SHA1_DIGEST_SIZE:
+		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
+		rctx->digest_size = SHA1_DIGEST_SIZE;
+		break;
+	case SHA256_DIGEST_SIZE:
+		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
+		rctx->digest_size = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	rctx->context_size = rctx->digest_size + 4;
+	rctx->active = 0;
+
+	mutex_init(&rctx->mutex);
+
+	return 0;
+}
+
+static int sahara_sha_update(struct ahash_request *req)
+{
+	return sahara_sha_enqueue(req, 0);
+}
+
+static int sahara_sha_final(struct ahash_request *req)
+{
+	req->nbytes = 0;
+	return sahara_sha_enqueue(req, 1);
+}
+
+static int sahara_sha_finup(struct ahash_request *req)
+{
+	return sahara_sha_enqueue(req, 1);
+}
+
+static int sahara_sha_digest(struct ahash_request *req)
+{
+	sahara_sha_init(req);
+
+	return sahara_sha_finup(req);
+}
+
+static int sahara_sha_export(struct ahash_request *req, void *out)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+
+	memcpy(out, ctx, sizeof(struct sahara_ctx));
+	memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
+	       sizeof(struct sahara_sha_reqctx));
+
+	return 0;
+}
+
+static int sahara_sha_import(struct ahash_request *req, const void *in)
+{
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+	struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
+	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
+
+	memcpy(ctx, in, sizeof(struct sahara_ctx));
+	memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
+	       sizeof(struct sahara_sha_reqctx));
+
+	return 0;
+}
+
+static int sahara_sha_cra_init(struct crypto_tfm *tfm)
+{
+	const char *name = crypto_tfm_alg_name(tfm);
+	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	ctx->shash_fallback = crypto_alloc_shash(name, 0,
+					CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ctx->shash_fallback)) {
+		pr_err("Error allocating fallback algo %s\n", name);
+		return PTR_ERR(ctx->shash_fallback);
+	}
+	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+				 sizeof(struct sahara_sha_reqctx) +
+				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
+
+	return 0;
+}
+
+static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
+{
+	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	crypto_free_shash(ctx->shash_fallback);
+	ctx->shash_fallback = NULL;
+}
+
 static struct crypto_alg aes_algs[] = {
 {
 	.cra_name		= "ecb(aes)",
@@ -797,14 +1301,66 @@ static struct crypto_alg aes_algs[] = {
 }
 };
 
+static struct ahash_alg sha_v3_algs[] = {
+{
+	.init		= sahara_sha_init,
+	.update		= sahara_sha_update,
+	.final		= sahara_sha_final,
+	.finup		= sahara_sha_finup,
+	.digest		= sahara_sha_digest,
+	.export		= sahara_sha_export,
+	.import		= sahara_sha_import,
+	.halg.digestsize	= SHA1_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "sha1",
+		.cra_driver_name	= "sahara-sha1",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+						CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA1_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct sahara_ctx),
+		.cra_alignmask		= 0,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= sahara_sha_cra_init,
+		.cra_exit		= sahara_sha_cra_exit,
+	}
+},
+};
+
+static struct ahash_alg sha_v4_algs[] = {
+{
+	.init		= sahara_sha_init,
+	.update		= sahara_sha_update,
+	.final		= sahara_sha_final,
+	.finup		= sahara_sha_finup,
+	.digest		= sahara_sha_digest,
+	.export		= sahara_sha_export,
+	.import		= sahara_sha_import,
+	.halg.digestsize	= SHA256_DIGEST_SIZE,
+	.halg.base	= {
+		.cra_name		= "sha256",
+		.cra_driver_name	= "sahara-sha256",
+		.cra_priority		= 300,
+		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
+						CRYPTO_ALG_ASYNC |
+						CRYPTO_ALG_NEED_FALLBACK,
+		.cra_blocksize		= SHA256_BLOCK_SIZE,
+		.cra_ctxsize		= sizeof(struct sahara_ctx),
+		.cra_alignmask		= 0,
+		.cra_module		= THIS_MODULE,
+		.cra_init		= sahara_sha_cra_init,
+		.cra_exit		= sahara_sha_cra_exit,
+	}
+},
+};
+
 static irqreturn_t sahara_irq_handler(int irq, void *data)
 {
 	struct sahara_dev *dev = (struct sahara_dev *)data;
 	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
 	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
 
-	del_timer(&dev->watchdog);
-
 	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
 		     SAHARA_REG_CMD);
 
@@ -819,7 +1375,7 @@ static irqreturn_t sahara_irq_handler(int irq, void *data)
 		dev->error = -EINVAL;
 	}
 
-	tasklet_schedule(&dev->done_task);
+	complete(&dev->dma_completion);
 
 	return IRQ_HANDLED;
 }
@@ -827,7 +1383,8 @@ static irqreturn_t sahara_irq_handler(int irq, void *data)
 
 static int sahara_register_algs(struct sahara_dev *dev)
 {
-	int err, i, j;
+	int err;
+	unsigned int i, j, k, l;
 
 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
 		INIT_LIST_HEAD(&aes_algs[i].cra_list);
@@ -836,8 +1393,29 @@ static int sahara_register_algs(struct sahara_dev *dev)
 			goto err_aes_algs;
 	}
 
+	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
+		err = crypto_register_ahash(&sha_v3_algs[k]);
+		if (err)
+			goto err_sha_v3_algs;
+	}
+
+	if (dev->version > SAHARA_VERSION_3)
+		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
+			err = crypto_register_ahash(&sha_v4_algs[l]);
+			if (err)
+				goto err_sha_v4_algs;
+		}
+
 	return 0;
 
+err_sha_v4_algs:
+	for (j = 0; j < l; j++)
+		crypto_unregister_ahash(&sha_v4_algs[j]);
+
+err_sha_v3_algs:
+	for (j = 0; j < k; j++)
+		crypto_unregister_ahash(&sha_v4_algs[j]);
+
 err_aes_algs:
 	for (j = 0; j < i; j++)
 		crypto_unregister_alg(&aes_algs[j]);
@@ -847,10 +1425,17 @@ static int sahara_register_algs(struct sahara_dev *dev)
 
 static void sahara_unregister_algs(struct sahara_dev *dev)
 {
-	int i;
+	unsigned int i;
 
 	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
 		crypto_unregister_alg(&aes_algs[i]);
+
+	for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+		crypto_unregister_ahash(&sha_v3_algs[i]);
+
+	if (dev->version > SAHARA_VERSION_3)
+		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+			crypto_unregister_ahash(&sha_v4_algs[i]);
 }
 
 static struct platform_device_id sahara_platform_ids[] = {
@@ -860,6 +1445,7 @@ static struct platform_device_id sahara_platform_ids[] = {
 MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
 
 static struct of_device_id sahara_dt_ids[] = {
+	{ .compatible = "fsl,imx53-sahara" },
 	{ .compatible = "fsl,imx27-sahara" },
 	{ /* sentinel */ }
 };
@@ -939,6 +1525,16 @@ static int sahara_probe(struct platform_device *pdev)
 	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
 	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
 
+	/* Allocate space for context: largest digest + message length field */
+	dev->context_base = dma_alloc_coherent(&pdev->dev,
+					SHA256_DIGEST_SIZE + 4,
+					&dev->context_phys_base, GFP_KERNEL);
+	if (!dev->context_base) {
+		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
+		err = -ENOMEM;
+		goto err_key;
+	}
+
 	/* Allocate space for HW links */
 	dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
 			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
@@ -956,28 +1552,40 @@ static int sahara_probe(struct platform_device *pdev)
 
 	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
 
+	spin_lock_init(&dev->lock);
+	mutex_init(&dev->queue_mutex);
+
 	dev_ptr = dev;
 
-	tasklet_init(&dev->queue_task, sahara_aes_queue_task,
-		     (unsigned long)dev);
-	tasklet_init(&dev->done_task, sahara_aes_done_task,
-		     (unsigned long)dev);
+	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
+	if (IS_ERR(dev->kthread)) {
+		err = PTR_ERR(dev->kthread);
+		goto err_link;
+	}
 
-	init_timer(&dev->watchdog);
-	dev->watchdog.function = &sahara_watchdog;
-	dev->watchdog.data = (unsigned long)dev;
+	init_completion(&dev->dma_completion);
 
 	clk_prepare_enable(dev->clk_ipg);
 	clk_prepare_enable(dev->clk_ahb);
 
 	version = sahara_read(dev, SAHARA_REG_VERSION);
-	if (version != SAHARA_VERSION_3) {
+	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
+		if (version != SAHARA_VERSION_3)
+			err = -ENODEV;
+	} else if (of_device_is_compatible(pdev->dev.of_node,
+			"fsl,imx53-sahara")) {
+		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
+			err = -ENODEV;
+		version = (version >> 8) & 0xff;
+	}
+	if (err == -ENODEV) {
 		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
-			version);
-		err = -ENODEV;
+				version);
 		goto err_algs;
 	}
 
+	dev->version = version;
+
 	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
 		     SAHARA_REG_CMD);
 	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
@@ -1000,11 +1608,15 @@ static int sahara_probe(struct platform_device *pdev)
 			  dev->hw_link[0], dev->hw_phys_link[0]);
 	clk_disable_unprepare(dev->clk_ipg);
 	clk_disable_unprepare(dev->clk_ahb);
+	kthread_stop(dev->kthread);
 	dev_ptr = NULL;
 err_link:
 	dma_free_coherent(&pdev->dev,
 			  2 * AES_KEYSIZE_128,
 			  dev->key_base, dev->key_phys_base);
+	dma_free_coherent(&pdev->dev,
+			  SHA256_DIGEST_SIZE,
+			  dev->context_base, dev->context_phys_base);
 err_key:
 	dma_free_coherent(&pdev->dev,
 			  SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
@@ -1027,8 +1639,7 @@ static int sahara_remove(struct platform_device *pdev)
 			  SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
 			  dev->hw_desc[0], dev->hw_phys_desc[0]);
 
-	tasklet_kill(&dev->done_task);
-	tasklet_kill(&dev->queue_task);
+	kthread_stop(dev->kthread);
 
 	sahara_unregister_algs(dev);
 
@@ -1055,4 +1666,5 @@ module_platform_driver(sahara_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
+MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 92105f3dc8e09c9f94ef2b4efcb839ec051e2c42..7c035de9055e51548003dbb6c8770ce833e3f492 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1688,6 +1688,7 @@ static void ux500_cryp_shutdown(struct platform_device *pdev)
 
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int ux500_cryp_suspend(struct device *dev)
 {
 	int ret;
@@ -1768,6 +1769,7 @@ static int ux500_cryp_resume(struct device *dev)
 
 	return ret;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
 
@@ -1810,7 +1812,7 @@ module_exit(ux500_cryp_mod_fini);
 module_param(cryp_mode, int, 0);
 
 MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
-MODULE_ALIAS("aes-all");
-MODULE_ALIAS("des-all");
+MODULE_ALIAS_CRYPTO("aes-all");
+MODULE_ALIAS_CRYPTO("des-all");
 
 MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 1c73f4fbc2526027f96b2323b21b0c4c0c1e1b0f..76ecc8d143d0f674c29618da5a86df788671845d 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1881,6 +1881,7 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
 			__func__);
 }
 
+#ifdef CONFIG_PM_SLEEP
 /**
  * ux500_hash_suspend - Function that suspends the hash device.
  * @dev:	Device to suspend.
@@ -1949,6 +1950,7 @@ static int ux500_hash_resume(struct device *dev)
 
 	return ret;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
 
@@ -1995,7 +1997,7 @@ module_exit(ux500_hash_mod_fini);
 MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
 MODULE_LICENSE("GPL");
 
-MODULE_ALIAS("sha1-all");
-MODULE_ALIAS("sha256-all");
-MODULE_ALIAS("hmac-sha1-all");
-MODULE_ALIAS("hmac-sha256-all");
+MODULE_ALIAS_CRYPTO("sha1-all");
+MODULE_ALIAS_CRYPTO("sha256-all");
+MODULE_ALIAS_CRYPTO("hmac-sha1-all");
+MODULE_ALIAS_CRYPTO("hmac-sha256-all");
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 99485415dcc2502f540490df5d443d3f92183563..91e97ec0141892cbf4d1676480d5fda3223b0e6b 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -44,6 +44,7 @@
 #include <linux/hrtimer.h>
 #include <linux/ktime.h>
 #include <asm/facility.h>
+#include <linux/crypto.h>
 
 #include "ap_bus.h"
 
@@ -71,7 +72,7 @@ MODULE_AUTHOR("IBM Corporation");
 MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
 		   "Copyright IBM Corp. 2006, 2012");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("z90crypt");
+MODULE_ALIAS_CRYPTO("z90crypt");
 
 /*
  * Module parameter
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 74b13ec1ebd449caf644fde19ed17921af9e4649..98abda9ed3aa868996e640ae7d15cff05e20539a 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -17,6 +17,32 @@
 
 struct crypto_ahash;
 
+/**
+ * DOC: Message Digest Algorithm Definitions
+ *
+ * These data structures define modular message digest algorithm
+ * implementations, managed via crypto_register_ahash(),
+ * crypto_register_shash(), crypto_unregister_ahash() and
+ * crypto_unregister_shash().
+ */
+
+/**
+ * struct hash_alg_common - define properties of message digest
+ * @digestsize: Size of the result of the transformation. A buffer of this size
+ *	        must be available to the @final and @finup calls, so they can
+ *	        store the resulting hash into it. For various predefined sizes,
+ *	        search include/crypto/ using
+ *	        git grep _DIGEST_SIZE include/crypto.
+ * @statesize: Size of the block for partial state of the transformation. A
+ *	       buffer of this size must be passed to the @export function as it
+ *	       will save the partial state of the transformation into it. On the
+ *	       other side, the @import function will load the state from a
+ *	       buffer of this size as well.
+ * @base: Start of data structure of cipher algorithm. The common data
+ *	  structure of crypto_alg contains information common to all ciphers.
+ *	  The hash_alg_common data structure now adds the hash-specific
+ *	  information.
+ */
 struct hash_alg_common {
 	unsigned int digestsize;
 	unsigned int statesize;
@@ -37,6 +63,63 @@ struct ahash_request {
 	void *__ctx[] CRYPTO_MINALIGN_ATTR;
 };
 
+/**
+ * struct ahash_alg - asynchronous message digest definition
+ * @init: Initialize the transformation context. Intended only to initialize the
+ *	  state of the HASH transformation at the begining. This shall fill in
+ *	  the internal structures used during the entire duration of the whole
+ *	  transformation. No data processing happens at this point.
+ * @update: Push a chunk of data into the driver for transformation. This
+ *	   function actually pushes blocks of data from upper layers into the
+ *	   driver, which then passes those to the hardware as seen fit. This
+ *	   function must not finalize the HASH transformation by calculating the
+ *	   final message digest as this only adds more data into the
+ *	   transformation. This function shall not modify the transformation
+ *	   context, as this function may be called in parallel with the same
+ *	   transformation object. Data processing can happen synchronously
+ *	   [SHASH] or asynchronously [AHASH] at this point.
+ * @final: Retrieve result from the driver. This function finalizes the
+ *	   transformation and retrieves the resulting hash from the driver and
+ *	   pushes it back to upper layers. No data processing happens at this
+ *	   point.
+ * @finup: Combination of @update and @final. This function is effectively a
+ *	   combination of @update and @final calls issued in sequence. As some
+ *	   hardware cannot do @update and @final separately, this callback was
+ *	   added to allow such hardware to be used at least by IPsec. Data
+ *	   processing can happen synchronously [SHASH] or asynchronously [AHASH]
+ *	   at this point.
+ * @digest: Combination of @init and @update and @final. This function
+ *	    effectively behaves as the entire chain of operations, @init,
+ *	    @update and @final issued in sequence. Just like @finup, this was
+ *	    added for hardware which cannot do even the @finup, but can only do
+ *	    the whole transformation in one run. Data processing can happen
+ *	    synchronously [SHASH] or asynchronously [AHASH] at this point.
+ * @setkey: Set optional key used by the hashing algorithm. Intended to push
+ *	    optional key used by the hashing algorithm from upper layers into
+ *	    the driver. This function can store the key in the transformation
+ *	    context or can outright program it into the hardware. In the former
+ *	    case, one must be careful to program the key into the hardware at
+ *	    appropriate time and one must be careful that .setkey() can be
+ *	    called multiple times during the existence of the transformation
+ *	    object. Not  all hashing algorithms do implement this function as it
+ *	    is only needed for keyed message digests. SHAx/MDx/CRCx do NOT
+ *	    implement this function. HMAC(MDx)/HMAC(SHAx)/CMAC(AES) do implement
+ *	    this function. This function must be called before any other of the
+ *	    @init, @update, @final, @finup, @digest is called. No data
+ *	    processing happens at this point.
+ * @export: Export partial state of the transformation. This function dumps the
+ *	    entire state of the ongoing transformation into a provided block of
+ *	    data so it can be @import 'ed back later on. This is useful in case
+ *	    you want to save partial result of the transformation after
+ *	    processing certain amount of data and reload this partial result
+ *	    multiple times later on for multiple re-use. No data processing
+ *	    happens at this point.
+ * @import: Import partial state of the transformation. This function loads the
+ *	    entire state of the ongoing transformation from a provided block of
+ *	    data so the transformation can continue from this point onward. No
+ *	    data processing happens at this point.
+ * @halg: see struct hash_alg_common
+ */
 struct ahash_alg {
 	int (*init)(struct ahash_request *req);
 	int (*update)(struct ahash_request *req);
@@ -63,6 +146,23 @@ struct shash_desc {
 		crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \
 	struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
 
+/**
+ * struct shash_alg - synchronous message digest definition
+ * @init: see struct ahash_alg
+ * @update: see struct ahash_alg
+ * @final: see struct ahash_alg
+ * @finup: see struct ahash_alg
+ * @digest: see struct ahash_alg
+ * @export: see struct ahash_alg
+ * @import: see struct ahash_alg
+ * @setkey: see struct ahash_alg
+ * @digestsize: see struct ahash_alg
+ * @statesize: see struct ahash_alg
+ * @descsize: Size of the operational state for the message digest. This state
+ * 	      size is the memory size that needs to be allocated for
+ *	      shash_desc.__ctx
+ * @base: internally used
+ */
 struct shash_alg {
 	int (*init)(struct shash_desc *desc);
 	int (*update)(struct shash_desc *desc, const u8 *data,
@@ -107,11 +207,35 @@ struct crypto_shash {
 	struct crypto_tfm base;
 };
 
+/**
+ * DOC: Asynchronous Message Digest API
+ *
+ * The asynchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto)
+ *
+ * The asynchronous cipher operation discussion provided for the
+ * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well.
+ */
+
 static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
 {
 	return container_of(tfm, struct crypto_ahash, base);
 }
 
+/**
+ * crypto_alloc_ahash() - allocate ahash cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      ahash cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an ahash. The returned struct
+ * crypto_ahash is the cipher handle that is required for any subsequent
+ * API invocation for that ahash.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
 struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
 					u32 mask);
 
@@ -120,6 +244,10 @@ static inline struct crypto_tfm *crypto_ahash_tfm(struct crypto_ahash *tfm)
 	return &tfm->base;
 }
 
+/**
+ * crypto_free_ahash() - zeroize and free the ahash handle
+ * @tfm: cipher handle to be freed
+ */
 static inline void crypto_free_ahash(struct crypto_ahash *tfm)
 {
 	crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
@@ -143,6 +271,16 @@ static inline struct hash_alg_common *crypto_hash_alg_common(
 	return __crypto_hash_alg_common(crypto_ahash_tfm(tfm)->__crt_alg);
 }
 
+/**
+ * crypto_ahash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ *
+ * Return: message digest size of cipher
+ */
 static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm)
 {
 	return crypto_hash_alg_common(tfm)->digestsize;
@@ -168,12 +306,32 @@ static inline void crypto_ahash_clear_flags(struct crypto_ahash *tfm, u32 flags)
 	crypto_tfm_clear_flags(crypto_ahash_tfm(tfm), flags);
 }
 
+/**
+ * crypto_ahash_reqtfm() - obtain cipher handle from request
+ * @req: asynchronous request handle that contains the reference to the ahash
+ *	 cipher handle
+ *
+ * Return the ahash cipher handle that is registered with the asynchronous
+ * request handle ahash_request.
+ *
+ * Return: ahash cipher handle
+ */
 static inline struct crypto_ahash *crypto_ahash_reqtfm(
 	struct ahash_request *req)
 {
 	return __crypto_ahash_cast(req->base.tfm);
 }
 
+/**
+ * crypto_ahash_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return the size of the ahash state size. With the crypto_ahash_export
+ * function, the caller can export the state into a buffer whose size is
+ * defined with this function.
+ *
+ * Return: size of the ahash state
+ */
 static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm)
 {
 	return tfm->reqsize;
@@ -184,38 +342,166 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
 	return req->__ctx;
 }
 
+/**
+ * crypto_ahash_setkey - set key for cipher handle
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the ahash cipher. The cipher
+ * handle must point to a keyed hash in order for this function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
 int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
 			unsigned int keylen);
+
+/**
+ * crypto_ahash_finup() - update and finalize message digest
+ * @req: reference to the ahash_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_ahash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ *	   occurred
+ */
 int crypto_ahash_finup(struct ahash_request *req);
+
+/**
+ * crypto_ahash_final() - calculate message digest
+ * @req: reference to the ahash_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer registered with the ahash_request handle.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ *	   occurred
+ */
 int crypto_ahash_final(struct ahash_request *req);
+
+/**
+ * crypto_ahash_digest() - calculate message digest for a buffer
+ * @req: reference to the ahash_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * This function is a "short-hand" for the function calls of crypto_ahash_init,
+ * crypto_ahash_update and crypto_ahash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ *	   occurred
+ */
 int crypto_ahash_digest(struct ahash_request *req);
 
+/**
+ * crypto_ahash_export() - extract current message digest state
+ * @req: reference to the ahash_request handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * This function exports the hash state of the ahash_request handle into the
+ * caller-allocated output buffer out which must have sufficient size (e.g. by
+ * calling crypto_ahash_reqsize).
+ *
+ * Return: 0 if the export was successful; < 0 if an error occurred
+ */
 static inline int crypto_ahash_export(struct ahash_request *req, void *out)
 {
 	return crypto_ahash_reqtfm(req)->export(req, out);
 }
 
+/**
+ * crypto_ahash_import() - import message digest state
+ * @req: reference to ahash_request handle the state is imported into
+ * @in: buffer holding the state
+ *
+ * This function imports the hash state into the ahash_request handle from the
+ * input buffer. That buffer should have been generated with the
+ * crypto_ahash_export function.
+ *
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
 static inline int crypto_ahash_import(struct ahash_request *req, const void *in)
 {
 	return crypto_ahash_reqtfm(req)->import(req, in);
 }
 
+/**
+ * crypto_ahash_init() - (re)initialize message digest handle
+ * @req: ahash_request handle that already is initialized with all necessary
+ *	 data using the ahash_request_* API functions
+ *
+ * The call (re-)initializes the message digest referenced by the ahash_request
+ * handle. Any potentially existing state created by previous operations is
+ * discarded.
+ *
+ * Return: 0 if the message digest initialization was successful; < 0 if an
+ *	   error occurred
+ */
 static inline int crypto_ahash_init(struct ahash_request *req)
 {
 	return crypto_ahash_reqtfm(req)->init(req);
 }
 
+/**
+ * crypto_ahash_update() - add data to message digest for processing
+ * @req: ahash_request handle that was previously initialized with the
+ *	 crypto_ahash_init call.
+ *
+ * Updates the message digest state of the &ahash_request handle. The input data
+ * is pointed to by the scatter/gather list registered in the &ahash_request
+ * handle
+ *
+ * Return: 0 if the message digest update was successful; < 0 if an error
+ *	   occurred
+ */
 static inline int crypto_ahash_update(struct ahash_request *req)
 {
 	return crypto_ahash_reqtfm(req)->update(req);
 }
 
+/**
+ * DOC: Asynchronous Hash Request Handle
+ *
+ * The &ahash_request data structure contains all pointers to data
+ * required for the asynchronous cipher operation. This includes the cipher
+ * handle (which can be used by multiple &ahash_request instances), pointer
+ * to plaintext and the message digest output buffer, asynchronous callback
+ * function, etc. It acts as a handle to the ahash_request_* API calls in a
+ * similar way as ahash handle to the crypto_ahash_* API calls.
+ */
+
+/**
+ * ahash_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing ahash handle in the request
+ * data structure with a different one.
+ */
 static inline void ahash_request_set_tfm(struct ahash_request *req,
 					 struct crypto_ahash *tfm)
 {
 	req->base.tfm = crypto_ahash_tfm(tfm);
 }
 
+/**
+ * ahash_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the ahash
+ * message digest API calls. During
+ * the allocation, the provided ahash handle
+ * is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
 static inline struct ahash_request *ahash_request_alloc(
 	struct crypto_ahash *tfm, gfp_t gfp)
 {
@@ -230,6 +516,10 @@ static inline struct ahash_request *ahash_request_alloc(
 	return req;
 }
 
+/**
+ * ahash_request_free() - zeroize and free the request data structure
+ * @req: request data structure cipher handle to be freed
+ */
 static inline void ahash_request_free(struct ahash_request *req)
 {
 	kzfree(req);
@@ -241,6 +531,31 @@ static inline struct ahash_request *ahash_request_cast(
 	return container_of(req, struct ahash_request, base);
 }
 
+/**
+ * ahash_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ *	   CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ *	   increase the wait queue beyond the initial maximum size;
+ *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ *	  crypto API, but provided to the callback function for it to use. Here,
+ *	  the caller can provide a reference to memory the callback function can
+ *	  operate on. As the callback function is invoked asynchronously to the
+ *	  related functionality, it may need to access data structures of the
+ *	  related functionality which can be referenced using this pointer. The
+ *	  callback function can access the memory via the "data" field in the
+ *	  &crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once
+ * the cipher operation completes.
+ *
+ * The callback function is registered with the &ahash_request handle and
+ * must comply with the following template
+ *
+ *	void callback_function(struct crypto_async_request *req, int error)
+ */
 static inline void ahash_request_set_callback(struct ahash_request *req,
 					      u32 flags,
 					      crypto_completion_t compl,
@@ -251,6 +566,19 @@ static inline void ahash_request_set_callback(struct ahash_request *req,
 	req->base.flags = flags;
 }
 
+/**
+ * ahash_request_set_crypt() - set data buffers
+ * @req: ahash_request handle to be updated
+ * @src: source scatter/gather list
+ * @result: buffer that is filled with the message digest -- the caller must
+ *	    ensure that the buffer has sufficient space by, for example, calling
+ *	    crypto_ahash_digestsize()
+ * @nbytes: number of bytes to process from the source scatter/gather list
+ *
+ * By using this call, the caller references the source scatter/gather list.
+ * The source scatter/gather list points to the data the message digest is to
+ * be calculated for.
+ */
 static inline void ahash_request_set_crypt(struct ahash_request *req,
 					   struct scatterlist *src, u8 *result,
 					   unsigned int nbytes)
@@ -260,6 +588,33 @@ static inline void ahash_request_set_crypt(struct ahash_request *req,
 	req->result = result;
 }
 
+/**
+ * DOC: Synchronous Message Digest API
+ *
+ * The synchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_SHASH (listed as type "shash" in /proc/crypto)
+ *
+ * The message digest API is able to maintain state information for the
+ * caller.
+ *
+ * The synchronous message digest API can store user-related context in in its
+ * shash_desc request data structure.
+ */
+
+/**
+ * crypto_alloc_shash() - allocate message digest handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a message digest. The returned &struct
+ * crypto_shash is the cipher handle that is required for any subsequent
+ * API invocation for that message digest.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
 struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
 					u32 mask);
 
@@ -268,6 +623,10 @@ static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
 	return &tfm->base;
 }
 
+/**
+ * crypto_free_shash() - zeroize and free the message digest handle
+ * @tfm: cipher handle to be freed
+ */
 static inline void crypto_free_shash(struct crypto_shash *tfm)
 {
 	crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
@@ -279,6 +638,15 @@ static inline unsigned int crypto_shash_alignmask(
 	return crypto_tfm_alg_alignmask(crypto_shash_tfm(tfm));
 }
 
+/**
+ * crypto_shash_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the message digest cipher referenced with the cipher
+ * handle is returned.
+ *
+ * Return: block size of cipher
+ */
 static inline unsigned int crypto_shash_blocksize(struct crypto_shash *tfm)
 {
 	return crypto_tfm_alg_blocksize(crypto_shash_tfm(tfm));
@@ -294,6 +662,15 @@ static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
 	return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
 }
 
+/**
+ * crypto_shash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ * Return: digest size of cipher
+ */
 static inline unsigned int crypto_shash_digestsize(struct crypto_shash *tfm)
 {
 	return crypto_shash_alg(tfm)->digestsize;
@@ -319,6 +696,21 @@ static inline void crypto_shash_clear_flags(struct crypto_shash *tfm, u32 flags)
 	crypto_tfm_clear_flags(crypto_shash_tfm(tfm), flags);
 }
 
+/**
+ * crypto_shash_descsize() - obtain the operational state size
+ * @tfm: cipher handle
+ *
+ * The size of the operational state the cipher needs during operation is
+ * returned for the hash referenced with the cipher handle. This size is
+ * required to calculate the memory requirements to allow the caller allocating
+ * sufficient memory for operational state.
+ *
+ * The operational state is defined with struct shash_desc where the size of
+ * that data structure is to be calculated as
+ * sizeof(struct shash_desc) + crypto_shash_descsize(alg)
+ *
+ * Return: size of the operational state
+ */
 static inline unsigned int crypto_shash_descsize(struct crypto_shash *tfm)
 {
 	return tfm->descsize;
@@ -329,29 +721,129 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
 	return desc->__ctx;
 }
 
+/**
+ * crypto_shash_setkey() - set key for message digest
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the keyed message digest cipher. The
+ * cipher handle must point to a keyed message digest cipher in order for this
+ * function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
 int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
 			unsigned int keylen);
+
+/**
+ * crypto_shash_digest() - calculate message digest for buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of crypto_shash_init,
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ *	   occurred
+ */
 int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
 			unsigned int len, u8 *out);
 
+/**
+ * crypto_shash_export() - extract operational state for message digest
+ * @desc: reference to the operational state handle whose state is exported
+ * @out: output buffer of sufficient size that can hold the hash state
+ *
+ * This function exports the hash state of the operational state handle into the
+ * caller-allocated output buffer out which must have sufficient size (e.g. by
+ * calling crypto_shash_descsize).
+ *
+ * Return: 0 if the export creation was successful; < 0 if an error occurred
+ */
 static inline int crypto_shash_export(struct shash_desc *desc, void *out)
 {
 	return crypto_shash_alg(desc->tfm)->export(desc, out);
 }
 
+/**
+ * crypto_shash_import() - import operational state
+ * @desc: reference to the operational state handle the state imported into
+ * @in: buffer holding the state
+ *
+ * This function imports the hash state into the operational state handle from
+ * the input buffer. That buffer should have been generated with the
+ * crypto_ahash_export function.
+ *
+ * Return: 0 if the import was successful; < 0 if an error occurred
+ */
 static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
 {
 	return crypto_shash_alg(desc->tfm)->import(desc, in);
 }
 
+/**
+ * crypto_shash_init() - (re)initialize message digest
+ * @desc: operational state handle that is already filled
+ *
+ * The call (re-)initializes the message digest referenced by the
+ * operational state handle. Any potentially existing state created by
+ * previous operations is discarded.
+ *
+ * Return: 0 if the message digest initialization was successful; < 0 if an
+ *	   error occurred
+ */
 static inline int crypto_shash_init(struct shash_desc *desc)
 {
 	return crypto_shash_alg(desc->tfm)->init(desc);
 }
 
+/**
+ * crypto_shash_update() - add data to message digest for processing
+ * @desc: operational state handle that is already initialized
+ * @data: input data to be added to the message digest
+ * @len: length of the input data
+ *
+ * Updates the message digest state of the operational state handle.
+ *
+ * Return: 0 if the message digest update was successful; < 0 if an error
+ *	   occurred
+ */
 int crypto_shash_update(struct shash_desc *desc, const u8 *data,
 			unsigned int len);
+
+/**
+ * crypto_shash_final() - calculate message digest
+ * @desc: operational state handle that is already filled with data
+ * @out: output buffer filled with the message digest
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer. The caller must ensure that the output buffer is
+ * large enough by using crypto_shash_digestsize.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ *	   occurred
+ */
 int crypto_shash_final(struct shash_desc *desc, u8 *out);
+
+/**
+ * crypto_shash_finup() - calculate message digest of buffer
+ * @desc: see crypto_shash_final()
+ * @data: see crypto_shash_update()
+ * @len: see crypto_shash_update()
+ * @out: see crypto_shash_final()
+ *
+ * This function is a "short-hand" for the function calls of
+ * crypto_shash_update and crypto_shash_final. The parameters have the same
+ * meaning as discussed for those separate functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ *	   occurred
+ */
 int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
 		       unsigned int len, u8 *out);
 
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index d61c11170213f22dd95467959aed5596b1edc5a5..cd62bf4289e9573301b25c5559ac91b412883dc6 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -42,6 +42,7 @@ struct af_alg_completion {
 struct af_alg_control {
 	struct af_alg_iv *iv;
 	int op;
+	unsigned int aead_assoclen;
 };
 
 struct af_alg_type {
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index c93f9b917925c3f0c28ea4e9e156e2a22767d72e..a16fb10142bf5bf7efe9210845ee75c0b4267744 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -20,11 +20,38 @@ extern struct crypto_rng *crypto_default_rng;
 int crypto_get_default_rng(void);
 void crypto_put_default_rng(void);
 
+/**
+ * DOC: Random number generator API
+ *
+ * The random number generator API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_RNG (listed as type "rng" in /proc/crypto)
+ */
+
 static inline struct crypto_rng *__crypto_rng_cast(struct crypto_tfm *tfm)
 {
 	return (struct crypto_rng *)tfm;
 }
 
+/**
+ * crypto_alloc_rng() -- allocate RNG handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a random number generator. The returned struct
+ * crypto_rng is the cipher handle that is required for any subsequent
+ * API invocation for that random number generator.
+ *
+ * For all random number generators, this call creates a new private copy of
+ * the random number generator that does not share a state with other
+ * instances. The only exception is the "krng" random number generator which
+ * is a kernel crypto API use case for the get_random_bytes() function of the
+ * /dev/random driver.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
 static inline struct crypto_rng *crypto_alloc_rng(const char *alg_name,
 						  u32 type, u32 mask)
 {
@@ -40,6 +67,14 @@ static inline struct crypto_tfm *crypto_rng_tfm(struct crypto_rng *tfm)
 	return &tfm->base;
 }
 
+/**
+ * crypto_rng_alg - obtain name of RNG
+ * @tfm: cipher handle
+ *
+ * Return the generic name (cra_name) of the initialized random number generator
+ *
+ * Return: generic name string
+ */
 static inline struct rng_alg *crypto_rng_alg(struct crypto_rng *tfm)
 {
 	return &crypto_rng_tfm(tfm)->__crt_alg->cra_rng;
@@ -50,23 +85,68 @@ static inline struct rng_tfm *crypto_rng_crt(struct crypto_rng *tfm)
 	return &crypto_rng_tfm(tfm)->crt_rng;
 }
 
+/**
+ * crypto_free_rng() - zeroize and free RNG handle
+ * @tfm: cipher handle to be freed
+ */
 static inline void crypto_free_rng(struct crypto_rng *tfm)
 {
 	crypto_free_tfm(crypto_rng_tfm(tfm));
 }
 
+/**
+ * crypto_rng_get_bytes() - get random number
+ * @tfm: cipher handle
+ * @rdata: output buffer holding the random numbers
+ * @dlen: length of the output buffer
+ *
+ * This function fills the caller-allocated buffer with random numbers using the
+ * random number generator referenced by the cipher handle.
+ *
+ * Return: > 0 function was successful and returns the number of generated
+ *	   bytes; < 0 if an error occurred
+ */
 static inline int crypto_rng_get_bytes(struct crypto_rng *tfm,
 				       u8 *rdata, unsigned int dlen)
 {
 	return crypto_rng_crt(tfm)->rng_gen_random(tfm, rdata, dlen);
 }
 
+/**
+ * crypto_rng_reset() - re-initialize the RNG
+ * @tfm: cipher handle
+ * @seed: seed input data
+ * @slen: length of the seed input data
+ *
+ * The reset function completely re-initializes the random number generator
+ * referenced by the cipher handle by clearing the current state. The new state
+ * is initialized with the caller provided seed or automatically, depending
+ * on the random number generator type (the ANSI X9.31 RNG requires
+ * caller-provided seed, the SP800-90A DRBGs perform an automatic seeding).
+ * The seed is provided as a parameter to this function call. The provided seed
+ * should have the length of the seed size defined for the random number
+ * generator as defined by crypto_rng_seedsize.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
 static inline int crypto_rng_reset(struct crypto_rng *tfm,
 				   u8 *seed, unsigned int slen)
 {
 	return crypto_rng_crt(tfm)->rng_reset(tfm, seed, slen);
 }
 
+/**
+ * crypto_rng_seedsize() - obtain seed size of RNG
+ * @tfm: cipher handle
+ *
+ * The function returns the seed size for the random number generator
+ * referenced by the cipher handle. This value may be zero if the random
+ * number generator does not implement or require a reseeding. For example,
+ * the SP800-90A DRBGs implement an automated reseeding after reaching a
+ * pre-defined threshold.
+ *
+ * Return: seed size for the random number generator
+ */
 static inline int crypto_rng_seedsize(struct crypto_rng *tfm)
 {
 	return crypto_rng_alg(tfm)->seedsize;
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index d45e949699ea26370efafa682d5c7c5c39c39108..9c8776d0ada87bcf9fa70bc401ce69e336b93d74 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -25,6 +25,19 @@
 #include <linux/string.h>
 #include <linux/uaccess.h>
 
+/*
+ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
+ * arbitrary modules to be loaded. Loading from userspace may still need the
+ * unprefixed names, so retains those aliases as well.
+ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
+ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
+ * expands twice on the same line. Instead, use a separate base name for the
+ * alias.
+ */
+#define MODULE_ALIAS_CRYPTO(name)	\
+		__MODULE_INFO(alias, alias_userspace, name);	\
+		__MODULE_INFO(alias, alias_crypto, "crypto-" name)
+
 /*
  * Algorithm masks and types.
  */
@@ -127,6 +140,13 @@ struct skcipher_givcrypt_request;
 
 typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
 
+/**
+ * DOC: Block Cipher Context Data Structures
+ *
+ * These data structures define the operating context for each block cipher
+ * type.
+ */
+
 struct crypto_async_request {
 	struct list_head list;
 	crypto_completion_t complete;
@@ -194,9 +214,63 @@ struct hash_desc {
 	u32 flags;
 };
 
-/*
- * Algorithms: modular crypto algorithm implementations, managed
- * via crypto_register_alg() and crypto_unregister_alg().
+/**
+ * DOC: Block Cipher Algorithm Definitions
+ *
+ * These data structures define modular crypto algorithm implementations,
+ * managed via crypto_register_alg() and crypto_unregister_alg().
+ */
+
+/**
+ * struct ablkcipher_alg - asynchronous block cipher definition
+ * @min_keysize: Minimum key size supported by the transformation. This is the
+ *		 smallest key length supported by this transformation algorithm.
+ *		 This must be set to one of the pre-defined values as this is
+ *		 not hardware specific. Possible values for this field can be
+ *		 found via git grep "_MIN_KEY_SIZE" include/crypto/
+ * @max_keysize: Maximum key size supported by the transformation. This is the
+ *		 largest key length supported by this transformation algorithm.
+ *		 This must be set to one of the pre-defined values as this is
+ *		 not hardware specific. Possible values for this field can be
+ *		 found via git grep "_MAX_KEY_SIZE" include/crypto/
+ * @setkey: Set key for the transformation. This function is used to either
+ *	    program a supplied key into the hardware or store the key in the
+ *	    transformation context for programming it later. Note that this
+ *	    function does modify the transformation context. This function can
+ *	    be called multiple times during the existence of the transformation
+ *	    object, so one must make sure the key is properly reprogrammed into
+ *	    the hardware. This function is also responsible for checking the key
+ *	    length for validity. In case a software fallback was put in place in
+ *	    the @cra_init call, this function might need to use the fallback if
+ *	    the algorithm doesn't support all of the key sizes.
+ * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
+ *	     the supplied scatterlist containing the blocks of data. The crypto
+ *	     API consumer is responsible for aligning the entries of the
+ *	     scatterlist properly and making sure the chunks are correctly
+ *	     sized. In case a software fallback was put in place in the
+ *	     @cra_init call, this function might need to use the fallback if
+ *	     the algorithm doesn't support all of the key sizes. In case the
+ *	     key was stored in transformation context, the key might need to be
+ *	     re-programmed into the hardware in this function. This function
+ *	     shall not modify the transformation context, as this function may
+ *	     be called in parallel with the same transformation object.
+ * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
+ *	     and the conditions are exactly the same.
+ * @givencrypt: Update the IV for encryption. With this function, a cipher
+ *	        implementation may provide the function on how to update the IV
+ *	        for encryption.
+ * @givdecrypt: Update the IV for decryption. This is the reverse of
+ *	        @givencrypt .
+ * @geniv: The transformation implementation may use an "IV generator" provided
+ *	   by the kernel crypto API. Several use cases have a predefined
+ *	   approach how IVs are to be updated. For such use cases, the kernel
+ *	   crypto API provides ready-to-use implementations that can be
+ *	   referenced with this variable.
+ * @ivsize: IV size applicable for transformation. The consumer must provide an
+ *	    IV of exactly that size to perform the encrypt or decrypt operation.
+ *
+ * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
+ * mandatory and must be filled.
  */
 struct ablkcipher_alg {
 	int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -213,6 +287,32 @@ struct ablkcipher_alg {
 	unsigned int ivsize;
 };
 
+/**
+ * struct aead_alg - AEAD cipher definition
+ * @maxauthsize: Set the maximum authentication tag size supported by the
+ *		 transformation. A transformation may support smaller tag sizes.
+ *		 As the authentication tag is a message digest to ensure the
+ *		 integrity of the encrypted data, a consumer typically wants the
+ *		 largest authentication tag possible as defined by this
+ *		 variable.
+ * @setauthsize: Set authentication size for the AEAD transformation. This
+ *		 function is used to specify the consumer requested size of the
+ * 		 authentication tag to be either generated by the transformation
+ *		 during encryption or the size of the authentication tag to be
+ *		 supplied during the decryption operation. This function is also
+ *		 responsible for checking the authentication tag size for
+ *		 validity.
+ * @setkey: see struct ablkcipher_alg
+ * @encrypt: see struct ablkcipher_alg
+ * @decrypt: see struct ablkcipher_alg
+ * @givencrypt: see struct ablkcipher_alg
+ * @givdecrypt: see struct ablkcipher_alg
+ * @geniv: see struct ablkcipher_alg
+ * @ivsize: see struct ablkcipher_alg
+ *
+ * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
+ * mandatory and must be filled.
+ */
 struct aead_alg {
 	int (*setkey)(struct crypto_aead *tfm, const u8 *key,
 	              unsigned int keylen);
@@ -228,6 +328,18 @@ struct aead_alg {
 	unsigned int maxauthsize;
 };
 
+/**
+ * struct blkcipher_alg - synchronous block cipher definition
+ * @min_keysize: see struct ablkcipher_alg
+ * @max_keysize: see struct ablkcipher_alg
+ * @setkey: see struct ablkcipher_alg
+ * @encrypt: see struct ablkcipher_alg
+ * @decrypt: see struct ablkcipher_alg
+ * @geniv: see struct ablkcipher_alg
+ * @ivsize: see struct ablkcipher_alg
+ *
+ * All fields except @geniv and @ivsize are mandatory and must be filled.
+ */
 struct blkcipher_alg {
 	int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
 	              unsigned int keylen);
@@ -245,6 +357,53 @@ struct blkcipher_alg {
 	unsigned int ivsize;
 };
 
+/**
+ * struct cipher_alg - single-block symmetric ciphers definition
+ * @cia_min_keysize: Minimum key size supported by the transformation. This is
+ *		     the smallest key length supported by this transformation
+ *		     algorithm. This must be set to one of the pre-defined
+ *		     values as this is not hardware specific. Possible values
+ *		     for this field can be found via git grep "_MIN_KEY_SIZE"
+ *		     include/crypto/
+ * @cia_max_keysize: Maximum key size supported by the transformation. This is
+ *		    the largest key length supported by this transformation
+ *		    algorithm. This must be set to one of the pre-defined values
+ *		    as this is not hardware specific. Possible values for this
+ *		    field can be found via git grep "_MAX_KEY_SIZE"
+ *		    include/crypto/
+ * @cia_setkey: Set key for the transformation. This function is used to either
+ *	        program a supplied key into the hardware or store the key in the
+ *	        transformation context for programming it later. Note that this
+ *	        function does modify the transformation context. This function
+ *	        can be called multiple times during the existence of the
+ *	        transformation object, so one must make sure the key is properly
+ *	        reprogrammed into the hardware. This function is also
+ *	        responsible for checking the key length for validity.
+ * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
+ *		 single block of data, which must be @cra_blocksize big. This
+ *		 always operates on a full @cra_blocksize and it is not possible
+ *		 to encrypt a block of smaller size. The supplied buffers must
+ *		 therefore also be at least of @cra_blocksize size. Both the
+ *		 input and output buffers are always aligned to @cra_alignmask.
+ *		 In case either of the input or output buffer supplied by user
+ *		 of the crypto API is not aligned to @cra_alignmask, the crypto
+ *		 API will re-align the buffers. The re-alignment means that a
+ *		 new buffer will be allocated, the data will be copied into the
+ *		 new buffer, then the processing will happen on the new buffer,
+ *		 then the data will be copied back into the original buffer and
+ *		 finally the new buffer will be freed. In case a software
+ *		 fallback was put in place in the @cra_init call, this function
+ *		 might need to use the fallback if the algorithm doesn't support
+ *		 all of the key sizes. In case the key was stored in
+ *		 transformation context, the key might need to be re-programmed
+ *		 into the hardware in this function. This function shall not
+ *		 modify the transformation context, as this function may be
+ *		 called in parallel with the same transformation object.
+ * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
+ *		 @cia_encrypt, and the conditions are exactly the same.
+ *
+ * All fields are mandatory and must be filled.
+ */
 struct cipher_alg {
 	unsigned int cia_min_keysize;
 	unsigned int cia_max_keysize;
@@ -261,6 +420,25 @@ struct compress_alg {
 			      unsigned int slen, u8 *dst, unsigned int *dlen);
 };
 
+/**
+ * struct rng_alg - random number generator definition
+ * @rng_make_random: The function defined by this variable obtains a random
+ *		     number. The random number generator transform must generate
+ *		     the random number out of the context provided with this
+ *		     call.
+ * @rng_reset: Reset of the random number generator by clearing the entire state.
+ *	       With the invocation of this function call, the random number
+ *             generator shall completely reinitialize its state. If the random
+ *	       number generator requires a seed for setting up a new state,
+ *	       the seed must be provided by the consumer while invoking this
+ *	       function. The required size of the seed is defined with
+ *	       @seedsize .
+ * @seedsize: The seed size required for a random number generator
+ *	      initialization defined with this variable. Some random number
+ *	      generators like the SP800-90A DRBG does not require a seed as the
+ *	      seeding is implemented internally without the need of support by
+ *	      the consumer. In this case, the seed size is set to zero.
+ */
 struct rng_alg {
 	int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
 			       unsigned int dlen);
@@ -277,6 +455,81 @@ struct rng_alg {
 #define cra_compress	cra_u.compress
 #define cra_rng		cra_u.rng
 
+/**
+ * struct crypto_alg - definition of a cryptograpic cipher algorithm
+ * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
+ *	       CRYPTO_ALG_* flags for the flags which go in here. Those are
+ *	       used for fine-tuning the description of the transformation
+ *	       algorithm.
+ * @cra_blocksize: Minimum block size of this transformation. The size in bytes
+ *		   of the smallest possible unit which can be transformed with
+ *		   this algorithm. The users must respect this value.
+ *		   In case of HASH transformation, it is possible for a smaller
+ *		   block than @cra_blocksize to be passed to the crypto API for
+ *		   transformation, in case of any other transformation type, an
+ * 		   error will be returned upon any attempt to transform smaller
+ *		   than @cra_blocksize chunks.
+ * @cra_ctxsize: Size of the operational context of the transformation. This
+ *		 value informs the kernel crypto API about the memory size
+ *		 needed to be allocated for the transformation context.
+ * @cra_alignmask: Alignment mask for the input and output data buffer. The data
+ *		   buffer containing the input data for the algorithm must be
+ *		   aligned to this alignment mask. The data buffer for the
+ *		   output data must be aligned to this alignment mask. Note that
+ *		   the Crypto API will do the re-alignment in software, but
+ *		   only under special conditions and there is a performance hit.
+ *		   The re-alignment happens at these occasions for different
+ *		   @cra_u types: cipher -- For both input data and output data
+ *		   buffer; ahash -- For output hash destination buf; shash --
+ *		   For output hash destination buf.
+ *		   This is needed on hardware which is flawed by design and
+ *		   cannot pick data from arbitrary addresses.
+ * @cra_priority: Priority of this transformation implementation. In case
+ *		  multiple transformations with same @cra_name are available to
+ *		  the Crypto API, the kernel will use the one with highest
+ *		  @cra_priority.
+ * @cra_name: Generic name (usable by multiple implementations) of the
+ *	      transformation algorithm. This is the name of the transformation
+ *	      itself. This field is used by the kernel when looking up the
+ *	      providers of particular transformation.
+ * @cra_driver_name: Unique name of the transformation provider. This is the
+ *		     name of the provider of the transformation. This can be any
+ *		     arbitrary value, but in the usual case, this contains the
+ *		     name of the chip or provider and the name of the
+ *		     transformation algorithm.
+ * @cra_type: Type of the cryptographic transformation. This is a pointer to
+ *	      struct crypto_type, which implements callbacks common for all
+ *	      trasnformation types. There are multiple options:
+ *	      &crypto_blkcipher_type, &crypto_ablkcipher_type,
+ *	      &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
+ *	      This field might be empty. In that case, there are no common
+ *	      callbacks. This is the case for: cipher, compress, shash.
+ * @cra_u: Callbacks implementing the transformation. This is a union of
+ *	   multiple structures. Depending on the type of transformation selected
+ *	   by @cra_type and @cra_flags above, the associated structure must be
+ *	   filled with callbacks. This field might be empty. This is the case
+ *	   for ahash, shash.
+ * @cra_init: Initialize the cryptographic transformation object. This function
+ *	      is used to initialize the cryptographic transformation object.
+ *	      This function is called only once at the instantiation time, right
+ *	      after the transformation context was allocated. In case the
+ *	      cryptographic hardware has some special requirements which need to
+ *	      be handled by software, this function shall check for the precise
+ *	      requirement of the transformation and put any software fallbacks
+ *	      in place.
+ * @cra_exit: Deinitialize the cryptographic transformation object. This is a
+ *	      counterpart to @cra_init, used to remove various changes set in
+ *	      @cra_init.
+ * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
+ * @cra_list: internally used
+ * @cra_users: internally used
+ * @cra_refcnt: internally used
+ * @cra_destroy: internally used
+ *
+ * The struct crypto_alg describes a generic Crypto API algorithm and is common
+ * for all of the transformations. Any variable not documented here shall not
+ * be used by a cipher implementation as it is internal to the Crypto API.
+ */
 struct crypto_alg {
 	struct list_head cra_list;
 	struct list_head cra_users;
@@ -581,6 +834,50 @@ static inline u32 crypto_skcipher_mask(u32 mask)
 	return mask;
 }
 
+/**
+ * DOC: Asynchronous Block Cipher API
+ *
+ * Asynchronous block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
+ *
+ * Asynchronous cipher operations imply that the function invocation for a
+ * cipher request returns immediately before the completion of the operation.
+ * The cipher request is scheduled as a separate kernel thread and therefore
+ * load-balanced on the different CPUs via the process scheduler. To allow
+ * the kernel crypto API to inform the caller about the completion of a cipher
+ * request, the caller must provide a callback function. That function is
+ * invoked with the cipher handle when the request completes.
+ *
+ * To support the asynchronous operation, additional information than just the
+ * cipher handle must be supplied to the kernel crypto API. That additional
+ * information is given by filling in the ablkcipher_request data structure.
+ *
+ * For the asynchronous block cipher API, the state is maintained with the tfm
+ * cipher handle. A single tfm can be used across multiple calls and in
+ * parallel. For asynchronous block cipher calls, context data supplied and
+ * only used by the caller can be referenced the request data structure in
+ * addition to the IV used for the cipher request. The maintenance of such
+ * state information would be important for a crypto driver implementer to
+ * have, because when calling the callback function upon completion of the
+ * cipher operation, that callback function may need some information about
+ * which operation just finished if it invoked multiple in parallel. This
+ * state information is unused by the kernel crypto API.
+ */
+
+/**
+ * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      ablkcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an ablkcipher. The returned struct
+ * crypto_ablkcipher is the cipher handle that is required for any subsequent
+ * API invocation for that ablkcipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
 struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
 						  u32 type, u32 mask);
 
@@ -590,11 +887,25 @@ static inline struct crypto_tfm *crypto_ablkcipher_tfm(
 	return &tfm->base;
 }
 
+/**
+ * crypto_free_ablkcipher() - zeroize and free cipher handle
+ * @tfm: cipher handle to be freed
+ */
 static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
 {
 	crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
 }
 
+/**
+ * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      ablkcipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the ablkcipher is known to the kernel crypto API; false
+ *	   otherwise
+ */
 static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
 					u32 mask)
 {
@@ -608,12 +919,31 @@ static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
 	return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
 }
 
+/**
+ * crypto_ablkcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the ablkcipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
 static inline unsigned int crypto_ablkcipher_ivsize(
 	struct crypto_ablkcipher *tfm)
 {
 	return crypto_ablkcipher_crt(tfm)->ivsize;
 }
 
+/**
+ * crypto_ablkcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the ablkcipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
 static inline unsigned int crypto_ablkcipher_blocksize(
 	struct crypto_ablkcipher *tfm)
 {
@@ -643,6 +973,22 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
 	crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
 }
 
+/**
+ * crypto_ablkcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the ablkcipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
 static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
 					   const u8 *key, unsigned int keylen)
 {
@@ -651,12 +997,32 @@ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
 	return crt->setkey(crt->base, key, keylen);
 }
 
+/**
+ * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
+ * @req: ablkcipher_request out of which the cipher handle is to be obtained
+ *
+ * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
+ * data structure.
+ *
+ * Return: crypto_ablkcipher handle
+ */
 static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
 	struct ablkcipher_request *req)
 {
 	return __crypto_ablkcipher_cast(req->base.tfm);
 }
 
+/**
+ * crypto_ablkcipher_encrypt() - encrypt plaintext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the ablkcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * ablkcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
 static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
 {
 	struct ablkcipher_tfm *crt =
@@ -664,6 +1030,17 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
 	return crt->encrypt(req);
 }
 
+/**
+ * crypto_ablkcipher_decrypt() - decrypt ciphertext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the ablkcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * ablkcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
 static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
 {
 	struct ablkcipher_tfm *crt =
@@ -671,12 +1048,37 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
 	return crt->decrypt(req);
 }
 
+/**
+ * DOC: Asynchronous Cipher Request Handle
+ *
+ * The ablkcipher_request data structure contains all pointers to data
+ * required for the asynchronous cipher operation. This includes the cipher
+ * handle (which can be used by multiple ablkcipher_request instances), pointer
+ * to plaintext and ciphertext, asynchronous callback function, etc. It acts
+ * as a handle to the ablkcipher_request_* API calls in a similar way as
+ * ablkcipher handle to the crypto_ablkcipher_* API calls.
+ */
+
+/**
+ * crypto_ablkcipher_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
 static inline unsigned int crypto_ablkcipher_reqsize(
 	struct crypto_ablkcipher *tfm)
 {
 	return crypto_ablkcipher_crt(tfm)->reqsize;
 }
 
+/**
+ * ablkcipher_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing ablkcipher handle in the request
+ * data structure with a different one.
+ */
 static inline void ablkcipher_request_set_tfm(
 	struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
 {
@@ -689,6 +1091,18 @@ static inline struct ablkcipher_request *ablkcipher_request_cast(
 	return container_of(req, struct ablkcipher_request, base);
 }
 
+/**
+ * ablkcipher_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the ablkcipher
+ * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
 static inline struct ablkcipher_request *ablkcipher_request_alloc(
 	struct crypto_ablkcipher *tfm, gfp_t gfp)
 {
@@ -703,11 +1117,40 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc(
 	return req;
 }
 
+/**
+ * ablkcipher_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
 static inline void ablkcipher_request_free(struct ablkcipher_request *req)
 {
 	kzfree(req);
 }
 
+/**
+ * ablkcipher_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ *         CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ *	   increase the wait queue beyond the initial maximum size;
+ *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ *	  crypto API, but provided to the callback function for it to use. Here,
+ *	  the caller can provide a reference to memory the callback function can
+ *	  operate on. As the callback function is invoked asynchronously to the
+ *	  related functionality, it may need to access data structures of the
+ *	  related functionality which can be referenced using this pointer. The
+ *	  callback function can access the memory via the "data" field in the
+ *	  crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once the
+ * cipher operation completes.
+ *
+ * The callback function is registered with the ablkcipher_request handle and
+ * must comply with the following template:
+ *
+ *	void callback_function(struct crypto_async_request *req, int error)
+ */
 static inline void ablkcipher_request_set_callback(
 	struct ablkcipher_request *req,
 	u32 flags, crypto_completion_t compl, void *data)
@@ -717,6 +1160,22 @@ static inline void ablkcipher_request_set_callback(
 	req->base.flags = flags;
 }
 
+/**
+ * ablkcipher_request_set_crypt() - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @nbytes: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ *      by crypto_ablkcipher_ivsize
+ *
+ * This function allows setting of the source data and destination data
+ * scatter / gather lists.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed: the source is the ciphertext and the destination is the plaintext.
+ */
 static inline void ablkcipher_request_set_crypt(
 	struct ablkcipher_request *req,
 	struct scatterlist *src, struct scatterlist *dst,
@@ -728,11 +1187,55 @@ static inline void ablkcipher_request_set_crypt(
 	req->info = iv;
 }
 
+/**
+ * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
+ *
+ * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
+ * (listed as type "aead" in /proc/crypto)
+ *
+ * The most prominent examples for this type of encryption is GCM and CCM.
+ * However, the kernel supports other types of AEAD ciphers which are defined
+ * with the following cipher string:
+ *
+ *	authenc(keyed message digest, block cipher)
+ *
+ * For example: authenc(hmac(sha256), cbc(aes))
+ *
+ * The example code provided for the asynchronous block cipher operation
+ * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
+ * the *aead* pendants discussed in the following. In addtion, for the AEAD
+ * operation, the aead_request_set_assoc function must be used to set the
+ * pointer to the associated data memory location before performing the
+ * encryption or decryption operation. In case of an encryption, the associated
+ * data memory is filled during the encryption operation. For decryption, the
+ * associated data memory must contain data that is used to verify the integrity
+ * of the decrypted data. Another deviation from the asynchronous block cipher
+ * operation is that the caller should explicitly check for -EBADMSG of the
+ * crypto_aead_decrypt. That error indicates an authentication error, i.e.
+ * a breach in the integrity of the message. In essence, that -EBADMSG error
+ * code is the key bonus an AEAD cipher has over "standard" block chaining
+ * modes.
+ */
+
 static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
 {
 	return (struct crypto_aead *)tfm;
 }
 
+/**
+ * crypto_alloc_aead() - allocate AEAD cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	     AEAD cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an AEAD. The returned struct
+ * crypto_aead is the cipher handle that is required for any subsequent
+ * API invocation for that AEAD.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
 struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
 
 static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
@@ -740,6 +1243,10 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
 	return &tfm->base;
 }
 
+/**
+ * crypto_free_aead() - zeroize and free aead handle
+ * @tfm: cipher handle to be freed
+ */
 static inline void crypto_free_aead(struct crypto_aead *tfm)
 {
 	crypto_free_tfm(crypto_aead_tfm(tfm));
@@ -750,16 +1257,47 @@ static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
 	return &crypto_aead_tfm(tfm)->crt_aead;
 }
 
+/**
+ * crypto_aead_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the aead referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
 static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
 {
 	return crypto_aead_crt(tfm)->ivsize;
 }
 
+/**
+ * crypto_aead_authsize() - obtain maximum authentication data size
+ * @tfm: cipher handle
+ *
+ * The maximum size of the authentication data for the AEAD cipher referenced
+ * by the AEAD cipher handle is returned. The authentication data size may be
+ * zero if the cipher implements a hard-coded maximum.
+ *
+ * The authentication data may also be known as "tag value".
+ *
+ * Return: authentication data size / tag size in bytes
+ */
 static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
 {
 	return crypto_aead_crt(tfm)->authsize;
 }
 
+/**
+ * crypto_aead_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the AEAD referenced with the cipher handle is returned.
+ * The caller may use that information to allocate appropriate memory for the
+ * data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
 static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
 {
 	return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
@@ -785,6 +1323,22 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
 	crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
 }
 
+/**
+ * crypto_aead_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the AEAD referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
 static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 				     unsigned int keylen)
 {
@@ -793,6 +1347,16 @@ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 	return crt->setkey(crt->base, key, keylen);
 }
 
+/**
+ * crypto_aead_setauthsize() - set authentication data size
+ * @tfm: cipher handle
+ * @authsize: size of the authentication data / tag in bytes
+ *
+ * Set the authentication data size / tag size. AEAD requires an authentication
+ * tag (or MAC) in addition to the associated data.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
 int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
 
 static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
@@ -800,27 +1364,105 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
 	return __crypto_aead_cast(req->base.tfm);
 }
 
+/**
+ * crypto_aead_encrypt() - encrypt plaintext
+ * @req: reference to the aead_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The encryption operation creates the authentication data /
+ *		  tag. That data is concatenated with the created ciphertext.
+ *		  The ciphertext memory size is therefore the given number of
+ *		  block cipher blocks + the size defined by the
+ *		  crypto_aead_setauthsize invocation. The caller must ensure
+ *		  that sufficient memory is available for the ciphertext and
+ *		  the authentication tag.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
 static inline int crypto_aead_encrypt(struct aead_request *req)
 {
 	return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
 }
 
+/**
+ * crypto_aead_decrypt() - decrypt ciphertext
+ * @req: reference to the ablkcipher_request handle that holds all information
+ *	 needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the aead_request handle. That data structure
+ * and how it is filled with data is discussed with the aead_request_*
+ * functions.
+ *
+ * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
+ *		  authentication data / tag. That authentication data / tag
+ *		  must have the size defined by the crypto_aead_setauthsize
+ *		  invocation.
+ *
+ *
+ * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
+ *	   cipher operation performs the authentication of the data during the
+ *	   decryption operation. Therefore, the function returns this error if
+ *	   the authentication of the ciphertext was unsuccessful (i.e. the
+ *	   integrity of the ciphertext or the associated data was violated);
+ *	   < 0 if an error occurred.
+ */
 static inline int crypto_aead_decrypt(struct aead_request *req)
 {
 	return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
 }
 
+/**
+ * DOC: Asynchronous AEAD Request Handle
+ *
+ * The aead_request data structure contains all pointers to data required for
+ * the AEAD cipher operation. This includes the cipher handle (which can be
+ * used by multiple aead_request instances), pointer to plaintext and
+ * ciphertext, asynchronous callback function, etc. It acts as a handle to the
+ * aead_request_* API calls in a similar way as AEAD handle to the
+ * crypto_aead_* API calls.
+ */
+
+/**
+ * crypto_aead_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
 static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
 {
 	return crypto_aead_crt(tfm)->reqsize;
 }
 
+/**
+ * aead_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing aead handle in the request
+ * data structure with a different one.
+ */
 static inline void aead_request_set_tfm(struct aead_request *req,
 					struct crypto_aead *tfm)
 {
 	req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
 }
 
+/**
+ * aead_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the AEAD
+ * encrypt and decrypt API calls. During the allocation, the provided aead
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
 static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
 						      gfp_t gfp)
 {
@@ -834,11 +1476,40 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
 	return req;
 }
 
+/**
+ * aead_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
 static inline void aead_request_free(struct aead_request *req)
 {
 	kzfree(req);
 }
 
+/**
+ * aead_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ *	   CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ *	   increase the wait queue beyond the initial maximum size;
+ *	   CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ *	  crypto API, but provided to the callback function for it to use. Here,
+ *	  the caller can provide a reference to memory the callback function can
+ *	  operate on. As the callback function is invoked asynchronously to the
+ *	  related functionality, it may need to access data structures of the
+ *	  related functionality which can be referenced using this pointer. The
+ *	  callback function can access the memory via the "data" field in the
+ *	  crypto_async_request data structure provided to the callback function.
+ *
+ * Setting the callback function that is triggered once the cipher operation
+ * completes
+ *
+ * The callback function is registered with the aead_request handle and
+ * must comply with the following template:
+ *
+ *	void callback_function(struct crypto_async_request *req, int error)
+ */
 static inline void aead_request_set_callback(struct aead_request *req,
 					     u32 flags,
 					     crypto_completion_t compl,
@@ -849,6 +1520,36 @@ static inline void aead_request_set_callback(struct aead_request *req,
 	req->base.flags = flags;
 }
 
+/**
+ * aead_request_set_crypt - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @cryptlen: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ *      by crypto_aead_ivsize()
+ *
+ * Setting the source data and destination data scatter / gather lists.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed: the source is the ciphertext and the destination is the plaintext.
+ *
+ * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
+ *		  the caller must concatenate the ciphertext followed by the
+ *		  authentication tag and provide the entire data stream to the
+ *		  decryption operation (i.e. the data length used for the
+ *		  initialization of the scatterlist and the data length for the
+ *		  decryption operation is identical). For encryption, however,
+ *		  the authentication tag is created while encrypting the data.
+ *		  The destination buffer must hold sufficient space for the
+ *		  ciphertext and the authentication tag while the encryption
+ *		  invocation must only point to the plaintext data size. The
+ *		  following code snippet illustrates the memory usage
+ *		  buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
+ *		  sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
+ *		  aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
+ */
 static inline void aead_request_set_crypt(struct aead_request *req,
 					  struct scatterlist *src,
 					  struct scatterlist *dst,
@@ -860,6 +1561,15 @@ static inline void aead_request_set_crypt(struct aead_request *req,
 	req->iv = iv;
 }
 
+/**
+ * aead_request_set_assoc() - set the associated data scatter / gather list
+ * @req: request handle
+ * @assoc: associated data scatter / gather list
+ * @assoclen: number of bytes to process from @assoc
+ *
+ * For encryption, the memory is filled with the associated data. For
+ * decryption, the memory must point to the associated data.
+ */
 static inline void aead_request_set_assoc(struct aead_request *req,
 					  struct scatterlist *assoc,
 					  unsigned int assoclen)
@@ -868,6 +1578,36 @@ static inline void aead_request_set_assoc(struct aead_request *req,
 	req->assoclen = assoclen;
 }
 
+/**
+ * DOC: Synchronous Block Cipher API
+ *
+ * The synchronous block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
+ *
+ * Synchronous calls, have a context in the tfm. But since a single tfm can be
+ * used in multiple calls and in parallel, this info should not be changeable
+ * (unless a lock is used). This applies, for example, to the symmetric key.
+ * However, the IV is changeable, so there is an iv field in blkcipher_tfm
+ * structure for synchronous blkcipher api. So, its the only state info that can
+ * be kept for synchronous calls without using a big lock across a tfm.
+ *
+ * The block cipher API allows the use of a complete cipher, i.e. a cipher
+ * consisting of a template (a block chaining mode) and a single block cipher
+ * primitive (e.g. AES).
+ *
+ * The plaintext data buffer and the ciphertext data buffer are pointed to
+ * by using scatter/gather lists. The cipher operation is performed
+ * on all segments of the provided scatter/gather lists.
+ *
+ * The kernel crypto API supports a cipher operation "in-place" which means that
+ * the caller may provide the same scatter/gather list for the plaintext and
+ * cipher text. After the completion of the cipher operation, the plaintext
+ * data is replaced with the ciphertext data in case of an encryption and vice
+ * versa for a decryption. The caller must ensure that the scatter/gather lists
+ * for the output data point to sufficiently large buffers, i.e. multiples of
+ * the block size of the cipher.
+ */
+
 static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
 	struct crypto_tfm *tfm)
 {
@@ -881,6 +1621,20 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast(
 	return __crypto_blkcipher_cast(tfm);
 }
 
+/**
+ * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      blkcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a block cipher. The returned struct
+ * crypto_blkcipher is the cipher handle that is required for any subsequent
+ * API invocation for that block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
 static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
 	const char *alg_name, u32 type, u32 mask)
 {
@@ -897,11 +1651,25 @@ static inline struct crypto_tfm *crypto_blkcipher_tfm(
 	return &tfm->base;
 }
 
+/**
+ * crypto_free_blkcipher() - zeroize and free the block cipher handle
+ * @tfm: cipher handle to be freed
+ */
 static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
 {
 	crypto_free_tfm(crypto_blkcipher_tfm(tfm));
 }
 
+/**
+ * crypto_has_blkcipher() - Search for the availability of a block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the block cipher is known to the kernel crypto API; false
+ *	   otherwise
+ */
 static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
 {
 	type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -911,6 +1679,12 @@ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
 	return crypto_has_alg(alg_name, type, mask);
 }
 
+/**
+ * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
+ * @tfm: cipher handle
+ *
+ * Return: The character string holding the name of the cipher
+ */
 static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
 {
 	return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
@@ -928,11 +1702,30 @@ static inline struct blkcipher_alg *crypto_blkcipher_alg(
 	return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
 }
 
+/**
+ * crypto_blkcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the block cipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
 static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
 {
 	return crypto_blkcipher_alg(tfm)->ivsize;
 }
 
+/**
+ * crypto_blkcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the block cipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation.
+ *
+ * Return: block size of cipher
+ */
 static inline unsigned int crypto_blkcipher_blocksize(
 	struct crypto_blkcipher *tfm)
 {
@@ -962,6 +1755,22 @@ static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
 	crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
 }
 
+/**
+ * crypto_blkcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the block cipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
 static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
 					  const u8 *key, unsigned int keylen)
 {
@@ -969,6 +1778,24 @@ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
 						 key, keylen);
 }
 
+/**
+ * crypto_blkcipher_encrypt() - encrypt plaintext
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ *	ciphertext
+ * @src: scatter/gather list that holds the plaintext
+ * @nbytes: number of bytes of the plaintext to encrypt.
+ *
+ * Encrypt plaintext data using the IV set by the caller with a preceding
+ * call of crypto_blkcipher_set_iv.
+ *
+ * The blkcipher_desc data structure must be filled by the caller and can
+ * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
+ * with the block cipher handle; desc.flags is filled with either
+ * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
 static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
 					   struct scatterlist *dst,
 					   struct scatterlist *src,
@@ -978,6 +1805,25 @@ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
 	return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
 }
 
+/**
+ * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ *	ciphertext
+ * @src: scatter/gather list that holds the plaintext
+ * @nbytes: number of bytes of the plaintext to encrypt.
+ *
+ * Encrypt plaintext data with the use of an IV that is solely used for this
+ * cipher operation. Any previously set IV is not used.
+ *
+ * The blkcipher_desc data structure must be filled by the caller and can
+ * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
+ * with the block cipher handle; desc.info is filled with the IV to be used for
+ * the current operation; desc.flags is filled with either
+ * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
 static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
 					      struct scatterlist *dst,
 					      struct scatterlist *src,
@@ -986,6 +1832,23 @@ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
 	return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
 }
 
+/**
+ * crypto_blkcipher_decrypt() - decrypt ciphertext
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ *	plaintext
+ * @src: scatter/gather list that holds the ciphertext
+ * @nbytes: number of bytes of the ciphertext to decrypt.
+ *
+ * Decrypt ciphertext data using the IV set by the caller with a preceding
+ * call of crypto_blkcipher_set_iv.
+ *
+ * The blkcipher_desc data structure must be filled by the caller as documented
+ * for the crypto_blkcipher_encrypt call above.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ *
+ */
 static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
 					   struct scatterlist *dst,
 					   struct scatterlist *src,
@@ -995,6 +1858,22 @@ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
 	return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
 }
 
+/**
+ * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
+ * @desc: reference to the block cipher handle with meta data
+ * @dst: scatter/gather list that is filled by the cipher operation with the
+ *	plaintext
+ * @src: scatter/gather list that holds the ciphertext
+ * @nbytes: number of bytes of the ciphertext to decrypt.
+ *
+ * Decrypt ciphertext data with the use of an IV that is solely used for this
+ * cipher operation. Any previously set IV is not used.
+ *
+ * The blkcipher_desc data structure must be filled by the caller as documented
+ * for the crypto_blkcipher_encrypt_iv call above.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
 static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
 					      struct scatterlist *dst,
 					      struct scatterlist *src,
@@ -1003,18 +1882,54 @@ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
 	return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
 }
 
+/**
+ * crypto_blkcipher_set_iv() - set IV for cipher
+ * @tfm: cipher handle
+ * @src: buffer holding the IV
+ * @len: length of the IV in bytes
+ *
+ * The caller provided IV is set for the block cipher referenced by the cipher
+ * handle.
+ */
 static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
 					   const u8 *src, unsigned int len)
 {
 	memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
 }
 
+/**
+ * crypto_blkcipher_get_iv() - obtain IV from cipher
+ * @tfm: cipher handle
+ * @dst: buffer filled with the IV
+ * @len: length of the buffer dst
+ *
+ * The caller can obtain the IV set for the block cipher referenced by the
+ * cipher handle and store it into the user-provided buffer. If the buffer
+ * has an insufficient space, the IV is truncated to fit the buffer.
+ */
 static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
 					   u8 *dst, unsigned int len)
 {
 	memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
 }
 
+/**
+ * DOC: Single Block Cipher API
+ *
+ * The single block cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
+ *
+ * Using the single block cipher API calls, operations with the basic cipher
+ * primitive can be implemented. These cipher primitives exclude any block
+ * chaining operations including IV handling.
+ *
+ * The purpose of this single block cipher API is to support the implementation
+ * of templates or other concepts that only need to perform the cipher operation
+ * on one block at a time. Templates invoke the underlying cipher primitive
+ * block-wise and process either the input or the output data of these cipher
+ * operations.
+ */
+
 static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
 {
 	return (struct crypto_cipher *)tfm;
@@ -1026,6 +1941,20 @@ static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
 	return __crypto_cipher_cast(tfm);
 }
 
+/**
+ * crypto_alloc_cipher() - allocate single block cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	     single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a single block cipher. The returned struct
+ * crypto_cipher is the cipher handle that is required for any subsequent API
+ * invocation for that single block cipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ *	   of an error, PTR_ERR() returns the error code.
+ */
 static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
 							u32 type, u32 mask)
 {
@@ -1041,11 +1970,25 @@ static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
 	return &tfm->base;
 }
 
+/**
+ * crypto_free_cipher() - zeroize and free the single block cipher handle
+ * @tfm: cipher handle to be freed
+ */
 static inline void crypto_free_cipher(struct crypto_cipher *tfm)
 {
 	crypto_free_tfm(crypto_cipher_tfm(tfm));
 }
 
+/**
+ * crypto_has_cipher() - Search for the availability of a single block cipher
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	     single block cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the single block cipher is known to the kernel crypto API;
+ *	   false otherwise
+ */
 static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
 {
 	type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -1060,6 +2003,16 @@ static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
 	return &crypto_cipher_tfm(tfm)->crt_cipher;
 }
 
+/**
+ * crypto_cipher_blocksize() - obtain block size for cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the single block cipher referenced with the cipher handle
+ * tfm is returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
 static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
 {
 	return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
@@ -1087,6 +2040,22 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
 	crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
 }
 
+/**
+ * crypto_cipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the single block cipher referenced by the
+ * cipher handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
 static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
                                        const u8 *key, unsigned int keylen)
 {
@@ -1094,6 +2063,15 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
 						  key, keylen);
 }
 
+/**
+ * crypto_cipher_encrypt_one() - encrypt one block of plaintext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the ciphertext
+ * @src: buffer holding the plaintext to be encrypted
+ *
+ * Invoke the encryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
 static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
 					     u8 *dst, const u8 *src)
 {
@@ -1101,6 +2079,15 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
 						dst, src);
 }
 
+/**
+ * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
+ * @tfm: cipher handle
+ * @dst: points to the buffer that will be filled with the plaintext
+ * @src: buffer holding the ciphertext to be decrypted
+ *
+ * Invoke the decryption operation of one block. The caller must ensure that
+ * the plaintext and ciphertext buffers are at least one block in size.
+ */
 static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
 					     u8 *dst, const u8 *src)
 {
@@ -1108,6 +2095,13 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
 						dst, src);
 }
 
+/**
+ * DOC: Synchronous Message Digest API
+ *
+ * The synchronous message digest API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
+ */
+
 static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
 {
 	return (struct crypto_hash *)tfm;
@@ -1120,6 +2114,20 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
 	return __crypto_hash_cast(tfm);
 }
 
+/**
+ * crypto_alloc_hash() - allocate synchronous message digest handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for a message digest. The returned struct
+ * crypto_hash is the cipher handle that is required for any subsequent
+ * API invocation for that message digest.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
 static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
 						    u32 type, u32 mask)
 {
@@ -1136,11 +2144,25 @@ static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
 	return &tfm->base;
 }
 
+/**
+ * crypto_free_hash() - zeroize and free message digest handle
+ * @tfm: cipher handle to be freed
+ */
 static inline void crypto_free_hash(struct crypto_hash *tfm)
 {
 	crypto_free_tfm(crypto_hash_tfm(tfm));
 }
 
+/**
+ * crypto_has_hash() - Search for the availability of a message digest
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *	      message digest cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the message digest cipher is known to the kernel crypto
+ *	   API; false otherwise
+ */
 static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
 {
 	type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -1156,6 +2178,15 @@ static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
 	return &crypto_hash_tfm(tfm)->crt_hash;
 }
 
+/**
+ * crypto_hash_blocksize() - obtain block size for message digest
+ * @tfm: cipher handle
+ *
+ * The block size for the message digest cipher referenced with the cipher
+ * handle is returned.
+ *
+ * Return: block size of cipher
+ */
 static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
 {
 	return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
@@ -1166,6 +2197,15 @@ static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
 	return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
 }
 
+/**
+ * crypto_hash_digestsize() - obtain message digest size
+ * @tfm: cipher handle
+ *
+ * The size for the message digest created by the message digest cipher
+ * referenced with the cipher handle is returned.
+ *
+ * Return: message digest size
+ */
 static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
 {
 	return crypto_hash_crt(tfm)->digestsize;
@@ -1186,11 +2226,38 @@ static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
 	crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
 }
 
+/**
+ * crypto_hash_init() - (re)initialize message digest handle
+ * @desc: cipher request handle that to be filled by caller --
+ *	  desc.tfm is filled with the hash cipher handle;
+ *	  desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
+ *
+ * The call (re-)initializes the message digest referenced by the hash cipher
+ * request handle. Any potentially existing state created by previous
+ * operations is discarded.
+ *
+ * Return: 0 if the message digest initialization was successful; < 0 if an
+ *	   error occurred
+ */
 static inline int crypto_hash_init(struct hash_desc *desc)
 {
 	return crypto_hash_crt(desc->tfm)->init(desc);
 }
 
+/**
+ * crypto_hash_update() - add data to message digest for processing
+ * @desc: cipher request handle
+ * @sg: scatter / gather list pointing to the data to be added to the message
+ *      digest
+ * @nbytes: number of bytes to be processed from @sg
+ *
+ * Updates the message digest state of the cipher handle pointed to by the
+ * hash cipher request handle with the input data pointed to by the
+ * scatter/gather list.
+ *
+ * Return: 0 if the message digest update was successful; < 0 if an error
+ *	   occurred
+ */
 static inline int crypto_hash_update(struct hash_desc *desc,
 				     struct scatterlist *sg,
 				     unsigned int nbytes)
@@ -1198,11 +2265,39 @@ static inline int crypto_hash_update(struct hash_desc *desc,
 	return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
 }
 
+/**
+ * crypto_hash_final() - calculate message digest
+ * @desc: cipher request handle
+ * @out: message digest output buffer -- The caller must ensure that the out
+ *	 buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
+ *	 function).
+ *
+ * Finalize the message digest operation and create the message digest
+ * based on all data added to the cipher handle. The message digest is placed
+ * into the output buffer.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ *	   occurred
+ */
 static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
 {
 	return crypto_hash_crt(desc->tfm)->final(desc, out);
 }
 
+/**
+ * crypto_hash_digest() - calculate message digest for a buffer
+ * @desc: see crypto_hash_final()
+ * @sg: see crypto_hash_update()
+ * @nbytes:  see crypto_hash_update()
+ * @out: see crypto_hash_final()
+ *
+ * This function is a "short-hand" for the function calls of crypto_hash_init,
+ * crypto_hash_update and crypto_hash_final. The parameters have the same
+ * meaning as discussed for those separate three functions.
+ *
+ * Return: 0 if the message digest creation was successful; < 0 if an error
+ *	   occurred
+ */
 static inline int crypto_hash_digest(struct hash_desc *desc,
 				     struct scatterlist *sg,
 				     unsigned int nbytes, u8 *out)
@@ -1210,6 +2305,17 @@ static inline int crypto_hash_digest(struct hash_desc *desc,
 	return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
 }
 
+/**
+ * crypto_hash_setkey() - set key for message digest
+ * @hash: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the message digest cipher. The cipher
+ * handle must point to a keyed hash in order for this function to succeed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
 static inline int crypto_hash_setkey(struct crypto_hash *hash,
 				     const u8 *key, unsigned int keylen)
 {
diff --git a/include/net/sock.h b/include/net/sock.h
index c3e83c9a8ab82f7dda0e3ed0e0f701bf9d6df362..2210fec65669c870384fe87f1d536eca34ab3586 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1593,6 +1593,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
 				     int *errcode, int max_page_order);
 void *sock_kmalloc(struct sock *sk, int size, gfp_t priority);
 void sock_kfree_s(struct sock *sk, void *mem, int size);
+void sock_kzfree_s(struct sock *sk, void *mem, int size);
 void sk_send_sigurg(struct sock *sk);
 
 /*
diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
index 0f9acce5b1ff196b5e7869f55d6716410d7d75da..f2acd2fde1f3abb93b1be11a54005ce10a57e54e 100644
--- a/include/uapi/linux/if_alg.h
+++ b/include/uapi/linux/if_alg.h
@@ -32,6 +32,8 @@ struct af_alg_iv {
 #define ALG_SET_KEY			1
 #define ALG_SET_IV			2
 #define ALG_SET_OP			3
+#define ALG_SET_AEAD_ASSOCLEN		4
+#define ALG_SET_AEAD_AUTHSIZE		5
 
 /* Operations */
 #define ALG_OP_DECRYPT			0
diff --git a/net/core/sock.c b/net/core/sock.c
index 9a56b2000c3f374fb95aedada3327447816a9512..1c7a33db1314f3e2a7ded154b9ea498023d2d2e4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1731,18 +1731,34 @@ void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
 }
 EXPORT_SYMBOL(sock_kmalloc);
 
-/*
- * Free an option memory block.
+/* Free an option memory block. Note, we actually want the inline
+ * here as this allows gcc to detect the nullify and fold away the
+ * condition entirely.
  */
-void sock_kfree_s(struct sock *sk, void *mem, int size)
+static inline void __sock_kfree_s(struct sock *sk, void *mem, int size,
+				  const bool nullify)
 {
 	if (WARN_ON_ONCE(!mem))
 		return;
-	kfree(mem);
+	if (nullify)
+		kzfree(mem);
+	else
+		kfree(mem);
 	atomic_sub(size, &sk->sk_omem_alloc);
 }
+
+void sock_kfree_s(struct sock *sk, void *mem, int size)
+{
+	__sock_kfree_s(sk, mem, size, false);
+}
 EXPORT_SYMBOL(sock_kfree_s);
 
+void sock_kzfree_s(struct sock *sk, void *mem, int size)
+{
+	__sock_kfree_s(sk, mem, size, true);
+}
+EXPORT_SYMBOL(sock_kzfree_s);
+
 /* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
    I think, these locks should be removed for datagram sockets.
  */