crypto: scatterwalk - Change scatterwalk_next calling convention
Rather than returning the address and storing the length into an argument pointer, add an address field to the walk struct and use that to store the address. The length is returned directly. Change the done functions to use this stored address instead of getting them from the caller. Split the address into two using a union. The user should only access the const version so that it is never changed. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
b949f55644
commit
65775cf313
|
|
@ -460,11 +460,10 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[], u32 len)
|
||||||
|
|
||||||
do {
|
do {
|
||||||
unsigned int n;
|
unsigned int n;
|
||||||
const u8 *p;
|
|
||||||
|
|
||||||
p = scatterwalk_next(&walk, len, &n);
|
n = scatterwalk_next(&walk, len);
|
||||||
gcm_update_mac(dg, p, n, buf, &buf_count, ctx);
|
gcm_update_mac(dg, walk.addr, n, buf, &buf_count, ctx);
|
||||||
scatterwalk_done_src(&walk, p, n);
|
scatterwalk_done_src(&walk, n);
|
||||||
|
|
||||||
if (unlikely(len / SZ_4K > (len - n) / SZ_4K)) {
|
if (unlikely(len / SZ_4K > (len - n) / SZ_4K)) {
|
||||||
kernel_neon_end();
|
kernel_neon_end();
|
||||||
|
|
|
||||||
|
|
@ -157,12 +157,11 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
|
||||||
|
|
||||||
do {
|
do {
|
||||||
unsigned int n;
|
unsigned int n;
|
||||||
const u8 *p;
|
|
||||||
|
|
||||||
p = scatterwalk_next(&walk, len, &n);
|
n = scatterwalk_next(&walk, len);
|
||||||
macp = ce_aes_ccm_auth_data(mac, p, n, macp, ctx->key_enc,
|
macp = ce_aes_ccm_auth_data(mac, walk.addr, n, macp,
|
||||||
num_rounds(ctx));
|
ctx->key_enc, num_rounds(ctx));
|
||||||
scatterwalk_done_src(&walk, p, n);
|
scatterwalk_done_src(&walk, n);
|
||||||
len -= n;
|
len -= n;
|
||||||
} while (len);
|
} while (len);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -309,11 +309,10 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u64 dg[], u32 len)
|
||||||
|
|
||||||
do {
|
do {
|
||||||
unsigned int n;
|
unsigned int n;
|
||||||
const u8 *p;
|
|
||||||
|
|
||||||
p = scatterwalk_next(&walk, len, &n);
|
n = scatterwalk_next(&walk, len);
|
||||||
gcm_update_mac(dg, p, n, buf, &buf_count, ctx);
|
gcm_update_mac(dg, walk.addr, n, buf, &buf_count, ctx);
|
||||||
scatterwalk_done_src(&walk, p, n);
|
scatterwalk_done_src(&walk, n);
|
||||||
len -= n;
|
len -= n;
|
||||||
} while (len);
|
} while (len);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -113,10 +113,10 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
|
||||||
|
|
||||||
do {
|
do {
|
||||||
unsigned int n, orig_n;
|
unsigned int n, orig_n;
|
||||||
const u8 *p, *orig_p;
|
const u8 *p;
|
||||||
|
|
||||||
orig_p = scatterwalk_next(&walk, assoclen, &orig_n);
|
orig_n = scatterwalk_next(&walk, assoclen);
|
||||||
p = orig_p;
|
p = walk.addr;
|
||||||
n = orig_n;
|
n = orig_n;
|
||||||
|
|
||||||
while (n > 0) {
|
while (n > 0) {
|
||||||
|
|
@ -149,7 +149,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
scatterwalk_done_src(&walk, orig_p, orig_n);
|
scatterwalk_done_src(&walk, orig_n);
|
||||||
assoclen -= orig_n;
|
assoclen -= orig_n;
|
||||||
} while (assoclen);
|
} while (assoclen);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -83,10 +83,10 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[])
|
||||||
|
|
||||||
do {
|
do {
|
||||||
unsigned int n, orig_n;
|
unsigned int n, orig_n;
|
||||||
const u8 *p, *orig_p;
|
const u8 *p;
|
||||||
|
|
||||||
orig_p = scatterwalk_next(&walk, assoclen, &orig_n);
|
orig_n = scatterwalk_next(&walk, assoclen);
|
||||||
p = orig_p;
|
p = walk.addr;
|
||||||
n = orig_n;
|
n = orig_n;
|
||||||
|
|
||||||
if (n + buflen < GHASH_BLOCK_SIZE) {
|
if (n + buflen < GHASH_BLOCK_SIZE) {
|
||||||
|
|
@ -118,7 +118,7 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[])
|
||||||
memcpy(&buffer[0], p, buflen);
|
memcpy(&buffer[0], p, buflen);
|
||||||
}
|
}
|
||||||
|
|
||||||
scatterwalk_done_src(&walk, orig_p, orig_n);
|
scatterwalk_done_src(&walk, orig_n);
|
||||||
assoclen -= orig_n;
|
assoclen -= orig_n;
|
||||||
} while (assoclen);
|
} while (assoclen);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -66,7 +66,6 @@ struct s390_xts_ctx {
|
||||||
struct gcm_sg_walk {
|
struct gcm_sg_walk {
|
||||||
struct scatter_walk walk;
|
struct scatter_walk walk;
|
||||||
unsigned int walk_bytes;
|
unsigned int walk_bytes;
|
||||||
u8 *walk_ptr;
|
|
||||||
unsigned int walk_bytes_remain;
|
unsigned int walk_bytes_remain;
|
||||||
u8 buf[AES_BLOCK_SIZE];
|
u8 buf[AES_BLOCK_SIZE];
|
||||||
unsigned int buf_bytes;
|
unsigned int buf_bytes;
|
||||||
|
|
@ -789,8 +788,7 @@ static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
|
||||||
{
|
{
|
||||||
if (gw->walk_bytes_remain == 0)
|
if (gw->walk_bytes_remain == 0)
|
||||||
return 0;
|
return 0;
|
||||||
gw->walk_ptr = scatterwalk_next(&gw->walk, gw->walk_bytes_remain,
|
gw->walk_bytes = scatterwalk_next(&gw->walk, gw->walk_bytes_remain);
|
||||||
&gw->walk_bytes);
|
|
||||||
return gw->walk_bytes;
|
return gw->walk_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -799,10 +797,9 @@ static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
|
||||||
{
|
{
|
||||||
gw->walk_bytes_remain -= nbytes;
|
gw->walk_bytes_remain -= nbytes;
|
||||||
if (out)
|
if (out)
|
||||||
scatterwalk_done_dst(&gw->walk, gw->walk_ptr, nbytes);
|
scatterwalk_done_dst(&gw->walk, nbytes);
|
||||||
else
|
else
|
||||||
scatterwalk_done_src(&gw->walk, gw->walk_ptr, nbytes);
|
scatterwalk_done_src(&gw->walk, nbytes);
|
||||||
gw->walk_ptr = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
||||||
|
|
@ -828,14 +825,14 @@ static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
|
if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
|
||||||
gw->ptr = gw->walk_ptr;
|
gw->ptr = gw->walk.addr;
|
||||||
gw->nbytes = gw->walk_bytes;
|
gw->nbytes = gw->walk_bytes;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
|
n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
|
||||||
memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
|
memcpy(gw->buf + gw->buf_bytes, gw->walk.addr, n);
|
||||||
gw->buf_bytes += n;
|
gw->buf_bytes += n;
|
||||||
_gcm_sg_unmap_and_advance(gw, n, false);
|
_gcm_sg_unmap_and_advance(gw, n, false);
|
||||||
if (gw->buf_bytes >= minbytesneeded) {
|
if (gw->buf_bytes >= minbytesneeded) {
|
||||||
|
|
@ -869,13 +866,13 @@ static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gw->walk_bytes >= minbytesneeded) {
|
if (gw->walk_bytes >= minbytesneeded) {
|
||||||
gw->ptr = gw->walk_ptr;
|
gw->ptr = gw->walk.addr;
|
||||||
gw->nbytes = gw->walk_bytes;
|
gw->nbytes = gw->walk_bytes;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
scatterwalk_unmap(gw->walk_ptr);
|
/* XXX */
|
||||||
gw->walk_ptr = NULL;
|
scatterwalk_unmap(gw->walk.addr);
|
||||||
|
|
||||||
gw->ptr = gw->buf;
|
gw->ptr = gw->buf;
|
||||||
gw->nbytes = sizeof(gw->buf);
|
gw->nbytes = sizeof(gw->buf);
|
||||||
|
|
@ -914,7 +911,7 @@ static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
|
||||||
if (!_gcm_sg_clamp_and_map(gw))
|
if (!_gcm_sg_clamp_and_map(gw))
|
||||||
return i;
|
return i;
|
||||||
n = min(gw->walk_bytes, bytesdone - i);
|
n = min(gw->walk_bytes, bytesdone - i);
|
||||||
memcpy(gw->walk_ptr, gw->buf + i, n);
|
memcpy(gw->walk.addr, gw->buf + i, n);
|
||||||
_gcm_sg_unmap_and_advance(gw, n, true);
|
_gcm_sg_unmap_and_advance(gw, n, true);
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
|
|
|
||||||
|
|
@ -71,10 +71,9 @@ static void crypto_aegis128_aesni_process_ad(
|
||||||
|
|
||||||
scatterwalk_start(&walk, sg_src);
|
scatterwalk_start(&walk, sg_src);
|
||||||
while (assoclen != 0) {
|
while (assoclen != 0) {
|
||||||
unsigned int size;
|
unsigned int size = scatterwalk_next(&walk, assoclen);
|
||||||
const u8 *mapped = scatterwalk_next(&walk, assoclen, &size);
|
const u8 *src = walk.addr;
|
||||||
unsigned int left = size;
|
unsigned int left = size;
|
||||||
const u8 *src = mapped;
|
|
||||||
|
|
||||||
if (pos + size >= AEGIS128_BLOCK_SIZE) {
|
if (pos + size >= AEGIS128_BLOCK_SIZE) {
|
||||||
if (pos > 0) {
|
if (pos > 0) {
|
||||||
|
|
@ -97,7 +96,7 @@ static void crypto_aegis128_aesni_process_ad(
|
||||||
pos += left;
|
pos += left;
|
||||||
assoclen -= size;
|
assoclen -= size;
|
||||||
|
|
||||||
scatterwalk_done_src(&walk, mapped, size);
|
scatterwalk_done_src(&walk, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pos > 0) {
|
if (pos > 0) {
|
||||||
|
|
|
||||||
|
|
@ -1306,12 +1306,11 @@ static void gcm_process_assoc(const struct aes_gcm_key *key, u8 ghash_acc[16],
|
||||||
scatterwalk_start(&walk, sg_src);
|
scatterwalk_start(&walk, sg_src);
|
||||||
|
|
||||||
while (assoclen) {
|
while (assoclen) {
|
||||||
unsigned int orig_len_this_step;
|
unsigned int orig_len_this_step = scatterwalk_next(
|
||||||
const u8 *orig_src = scatterwalk_next(&walk, assoclen,
|
&walk, assoclen);
|
||||||
&orig_len_this_step);
|
|
||||||
unsigned int len_this_step = orig_len_this_step;
|
unsigned int len_this_step = orig_len_this_step;
|
||||||
unsigned int len;
|
unsigned int len;
|
||||||
const u8 *src = orig_src;
|
const u8 *src = walk.addr;
|
||||||
|
|
||||||
if (unlikely(pos)) {
|
if (unlikely(pos)) {
|
||||||
len = min(len_this_step, 16 - pos);
|
len = min(len_this_step, 16 - pos);
|
||||||
|
|
@ -1335,7 +1334,7 @@ static void gcm_process_assoc(const struct aes_gcm_key *key, u8 ghash_acc[16],
|
||||||
pos = len_this_step;
|
pos = len_this_step;
|
||||||
}
|
}
|
||||||
next:
|
next:
|
||||||
scatterwalk_done_src(&walk, orig_src, orig_len_this_step);
|
scatterwalk_done_src(&walk, orig_len_this_step);
|
||||||
if (need_resched()) {
|
if (need_resched()) {
|
||||||
kernel_fpu_end();
|
kernel_fpu_end();
|
||||||
kernel_fpu_begin();
|
kernel_fpu_begin();
|
||||||
|
|
|
||||||
|
|
@ -284,10 +284,9 @@ static void crypto_aegis128_process_ad(struct aegis_state *state,
|
||||||
|
|
||||||
scatterwalk_start(&walk, sg_src);
|
scatterwalk_start(&walk, sg_src);
|
||||||
while (assoclen != 0) {
|
while (assoclen != 0) {
|
||||||
unsigned int size;
|
unsigned int size = scatterwalk_next(&walk, assoclen);
|
||||||
const u8 *mapped = scatterwalk_next(&walk, assoclen, &size);
|
const u8 *src = walk.addr;
|
||||||
unsigned int left = size;
|
unsigned int left = size;
|
||||||
const u8 *src = mapped;
|
|
||||||
|
|
||||||
if (pos + size >= AEGIS_BLOCK_SIZE) {
|
if (pos + size >= AEGIS_BLOCK_SIZE) {
|
||||||
if (pos > 0) {
|
if (pos > 0) {
|
||||||
|
|
@ -308,7 +307,7 @@ static void crypto_aegis128_process_ad(struct aegis_state *state,
|
||||||
|
|
||||||
pos += left;
|
pos += left;
|
||||||
assoclen -= size;
|
assoclen -= size;
|
||||||
scatterwalk_done_src(&walk, mapped, size);
|
scatterwalk_done_src(&walk, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pos > 0) {
|
if (pos > 0) {
|
||||||
|
|
|
||||||
|
|
@ -34,12 +34,11 @@ inline void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk,
|
||||||
unsigned int nbytes)
|
unsigned int nbytes)
|
||||||
{
|
{
|
||||||
do {
|
do {
|
||||||
const void *src_addr;
|
|
||||||
unsigned int to_copy;
|
unsigned int to_copy;
|
||||||
|
|
||||||
src_addr = scatterwalk_next(walk, nbytes, &to_copy);
|
to_copy = scatterwalk_next(walk, nbytes);
|
||||||
memcpy(buf, src_addr, to_copy);
|
memcpy(buf, walk->addr, to_copy);
|
||||||
scatterwalk_done_src(walk, src_addr, to_copy);
|
scatterwalk_done_src(walk, to_copy);
|
||||||
buf += to_copy;
|
buf += to_copy;
|
||||||
nbytes -= to_copy;
|
nbytes -= to_copy;
|
||||||
} while (nbytes);
|
} while (nbytes);
|
||||||
|
|
@ -50,12 +49,11 @@ inline void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf,
|
||||||
unsigned int nbytes)
|
unsigned int nbytes)
|
||||||
{
|
{
|
||||||
do {
|
do {
|
||||||
void *dst_addr;
|
|
||||||
unsigned int to_copy;
|
unsigned int to_copy;
|
||||||
|
|
||||||
dst_addr = scatterwalk_next(walk, nbytes, &to_copy);
|
to_copy = scatterwalk_next(walk, nbytes);
|
||||||
memcpy(dst_addr, buf, to_copy);
|
memcpy(walk->addr, buf, to_copy);
|
||||||
scatterwalk_done_dst(walk, dst_addr, to_copy);
|
scatterwalk_done_dst(walk, to_copy);
|
||||||
buf += to_copy;
|
buf += to_copy;
|
||||||
nbytes -= to_copy;
|
nbytes -= to_copy;
|
||||||
} while (nbytes);
|
} while (nbytes);
|
||||||
|
|
|
||||||
|
|
@ -41,12 +41,16 @@ static int skcipher_walk_next(struct skcipher_walk *walk);
|
||||||
|
|
||||||
static inline void skcipher_map_src(struct skcipher_walk *walk)
|
static inline void skcipher_map_src(struct skcipher_walk *walk)
|
||||||
{
|
{
|
||||||
walk->src.virt.addr = scatterwalk_map(&walk->in);
|
/* XXX */
|
||||||
|
walk->in.__addr = scatterwalk_map(&walk->in);
|
||||||
|
walk->src.virt.addr = walk->in.addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void skcipher_map_dst(struct skcipher_walk *walk)
|
static inline void skcipher_map_dst(struct skcipher_walk *walk)
|
||||||
{
|
{
|
||||||
walk->dst.virt.addr = scatterwalk_map(&walk->out);
|
/* XXX */
|
||||||
|
walk->out.__addr = scatterwalk_map(&walk->out);
|
||||||
|
walk->dst.virt.addr = walk->out.addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
|
static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
|
||||||
|
|
@ -120,7 +124,7 @@ int skcipher_walk_done(struct skcipher_walk *walk, int res)
|
||||||
goto dst_done;
|
goto dst_done;
|
||||||
}
|
}
|
||||||
|
|
||||||
scatterwalk_done_dst(&walk->out, walk->dst.virt.addr, n);
|
scatterwalk_done_dst(&walk->out, n);
|
||||||
dst_done:
|
dst_done:
|
||||||
|
|
||||||
if (res > 0)
|
if (res > 0)
|
||||||
|
|
|
||||||
|
|
@ -154,17 +154,16 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
|
||||||
struct scatter_walk walk;
|
struct scatter_walk walk;
|
||||||
struct nx_sg *nx_sg = nx_dst;
|
struct nx_sg *nx_sg = nx_dst;
|
||||||
unsigned int n, len = *src_len;
|
unsigned int n, len = *src_len;
|
||||||
char *dst;
|
|
||||||
|
|
||||||
/* we need to fast forward through @start bytes first */
|
/* we need to fast forward through @start bytes first */
|
||||||
scatterwalk_start_at_pos(&walk, sg_src, start);
|
scatterwalk_start_at_pos(&walk, sg_src, start);
|
||||||
|
|
||||||
while (len && (nx_sg - nx_dst) < sglen) {
|
while (len && (nx_sg - nx_dst) < sglen) {
|
||||||
dst = scatterwalk_next(&walk, len, &n);
|
n = scatterwalk_next(&walk, len);
|
||||||
|
|
||||||
nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
|
nx_sg = nx_build_sg_list(nx_sg, walk.addr, &n, sglen - (nx_sg - nx_dst));
|
||||||
|
|
||||||
scatterwalk_done_src(&walk, dst, n);
|
scatterwalk_done_src(&walk, n);
|
||||||
len -= n;
|
len -= n;
|
||||||
}
|
}
|
||||||
/* update to_process */
|
/* update to_process */
|
||||||
|
|
|
||||||
|
|
@ -54,6 +54,7 @@ struct rtattr;
|
||||||
struct scatterlist;
|
struct scatterlist;
|
||||||
struct seq_file;
|
struct seq_file;
|
||||||
struct sk_buff;
|
struct sk_buff;
|
||||||
|
union crypto_no_such_thing;
|
||||||
|
|
||||||
struct crypto_instance {
|
struct crypto_instance {
|
||||||
struct crypto_alg alg;
|
struct crypto_alg alg;
|
||||||
|
|
@ -108,6 +109,12 @@ struct crypto_queue {
|
||||||
struct scatter_walk {
|
struct scatter_walk {
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
unsigned int offset;
|
unsigned int offset;
|
||||||
|
union {
|
||||||
|
void *const addr;
|
||||||
|
|
||||||
|
/* Private API field, do not touch. */
|
||||||
|
union crypto_no_such_thing *__addr;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
struct crypto_attr_alg {
|
struct crypto_attr_alg {
|
||||||
|
|
|
||||||
|
|
@ -120,18 +120,20 @@ static inline void *scatterwalk_map(struct scatter_walk *walk)
|
||||||
* scatterwalk_next() - Get the next data buffer in a scatterlist walk
|
* scatterwalk_next() - Get the next data buffer in a scatterlist walk
|
||||||
* @walk: the scatter_walk
|
* @walk: the scatter_walk
|
||||||
* @total: the total number of bytes remaining, > 0
|
* @total: the total number of bytes remaining, > 0
|
||||||
* @nbytes_ret: (out) the next number of bytes available, <= @total
|
|
||||||
*
|
*
|
||||||
* Return: A virtual address for the next segment of data from the scatterlist.
|
* A virtual address for the next segment of data from the scatterlist will
|
||||||
* The caller must call scatterwalk_done_src() or scatterwalk_done_dst()
|
* be placed into @walk->addr. The caller must call scatterwalk_done_src()
|
||||||
* when it is done using this virtual address.
|
* or scatterwalk_done_dst() when it is done using this virtual address.
|
||||||
|
*
|
||||||
|
* Returns: the next number of bytes available, <= @total
|
||||||
*/
|
*/
|
||||||
static inline void *scatterwalk_next(struct scatter_walk *walk,
|
static inline unsigned int scatterwalk_next(struct scatter_walk *walk,
|
||||||
unsigned int total,
|
unsigned int total)
|
||||||
unsigned int *nbytes_ret)
|
|
||||||
{
|
{
|
||||||
*nbytes_ret = scatterwalk_clamp(walk, total);
|
unsigned int nbytes = scatterwalk_clamp(walk, total);
|
||||||
return scatterwalk_map(walk);
|
|
||||||
|
walk->__addr = scatterwalk_map(walk);
|
||||||
|
return nbytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void scatterwalk_unmap(const void *vaddr)
|
static inline void scatterwalk_unmap(const void *vaddr)
|
||||||
|
|
@ -149,32 +151,31 @@ static inline void scatterwalk_advance(struct scatter_walk *walk,
|
||||||
/**
|
/**
|
||||||
* scatterwalk_done_src() - Finish one step of a walk of source scatterlist
|
* scatterwalk_done_src() - Finish one step of a walk of source scatterlist
|
||||||
* @walk: the scatter_walk
|
* @walk: the scatter_walk
|
||||||
* @vaddr: the address returned by scatterwalk_next()
|
|
||||||
* @nbytes: the number of bytes processed this step, less than or equal to the
|
* @nbytes: the number of bytes processed this step, less than or equal to the
|
||||||
* number of bytes that scatterwalk_next() returned.
|
* number of bytes that scatterwalk_next() returned.
|
||||||
*
|
*
|
||||||
* Use this if the @vaddr was not written to, i.e. it is source data.
|
* Use this if the mapped address was not written to, i.e. it is source data.
|
||||||
*/
|
*/
|
||||||
static inline void scatterwalk_done_src(struct scatter_walk *walk,
|
static inline void scatterwalk_done_src(struct scatter_walk *walk,
|
||||||
const void *vaddr, unsigned int nbytes)
|
unsigned int nbytes)
|
||||||
{
|
{
|
||||||
scatterwalk_unmap(vaddr);
|
scatterwalk_unmap(walk->addr);
|
||||||
scatterwalk_advance(walk, nbytes);
|
scatterwalk_advance(walk, nbytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
|
* scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist
|
||||||
* @walk: the scatter_walk
|
* @walk: the scatter_walk
|
||||||
* @vaddr: the address returned by scatterwalk_next()
|
|
||||||
* @nbytes: the number of bytes processed this step, less than or equal to the
|
* @nbytes: the number of bytes processed this step, less than or equal to the
|
||||||
* number of bytes that scatterwalk_next() returned.
|
* number of bytes that scatterwalk_next() returned.
|
||||||
*
|
*
|
||||||
* Use this if the @vaddr may have been written to, i.e. it is destination data.
|
* Use this if the mapped address may have been written to, i.e. it is
|
||||||
|
* destination data.
|
||||||
*/
|
*/
|
||||||
static inline void scatterwalk_done_dst(struct scatter_walk *walk,
|
static inline void scatterwalk_done_dst(struct scatter_walk *walk,
|
||||||
void *vaddr, unsigned int nbytes)
|
unsigned int nbytes)
|
||||||
{
|
{
|
||||||
scatterwalk_unmap(vaddr);
|
scatterwalk_unmap(walk->addr);
|
||||||
/*
|
/*
|
||||||
* Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just
|
* Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just
|
||||||
* relying on flush_dcache_page() being a no-op when not implemented,
|
* relying on flush_dcache_page() being a no-op when not implemented,
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue