/* check that there's enough headroom in the skb for packet * encapsulation
*/ if (unlikely(skb_cow_head(skb, OVPN_HEAD_ROOM))) return -ENOBUFS;
/* get number of skb frags and ensure that packet data is writable */
nfrags = skb_cow_data(skb, 0, &trailer); if (unlikely(nfrags < 0)) return nfrags;
if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2))) return -ENOSPC;
/* sg may be required by async crypto */
ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
(nfrags + 2), GFP_ATOMIC); if (unlikely(!ovpn_skb_cb(skb)->sg)) return -ENOMEM;
/* obtain packet ID, which is used both as a first * 4 bytes of nonce and last 4 bytes of associated data.
*/
ret = ovpn_pktid_xmit_next(&ks->pid_xmit, &pktid); if (unlikely(ret < 0)) return ret;
/* iv may be required by async crypto */
ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC); if (unlikely(!ovpn_skb_cb(skb)->iv)) return -ENOMEM;
iv = ovpn_skb_cb(skb)->iv;
/* concat 4 bytes packet id and 8 bytes nonce tail into 12 bytes * nonce
*/
ovpn_pktid_aead_write(pktid, ks->nonce_tail_xmit, iv);
/* make space for packet id and push it to the front */
__skb_push(skb, OVPN_NONCE_WIRE_SIZE);
memcpy(skb->data, iv, OVPN_NONCE_WIRE_SIZE);
/* add packet op as head of additional data */
op = ovpn_opcode_compose(OVPN_DATA_V2, ks->key_id, peer->id);
__skb_push(skb, OVPN_OPCODE_SIZE);
BUILD_BUG_ON(sizeof(op) != OVPN_OPCODE_SIZE);
*((__force __be32 *)skb->data) = htonl(op);
/* AEAD Additional data */
sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
req = aead_request_alloc(ks->encrypt, GFP_ATOMIC); if (unlikely(!req)) return -ENOMEM;
/* sanity check on packet size, payload size must be >= 0 */ if (unlikely(payload_len < 0)) return -EINVAL;
/* Prepare the skb data buffer to be accessed up until the auth tag. * This is required because this area is directly mapped into the sg * list.
*/ if (unlikely(!pskb_may_pull(skb, payload_offset))) return -ENODATA;
/* get number of skb frags and ensure that packet data is writable */
nfrags = skb_cow_data(skb, 0, &trailer); if (unlikely(nfrags < 0)) return nfrags;
if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2))) return -ENOSPC;
/* sg may be required by async crypto */
ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
(nfrags + 2), GFP_ATOMIC); if (unlikely(!ovpn_skb_cb(skb)->sg)) return -ENOMEM;
/* iv may be required by async crypto */
ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC); if (unlikely(!ovpn_skb_cb(skb)->iv)) return -ENOMEM;
iv = ovpn_skb_cb(skb)->iv;
/* copy nonce into IV buffer */
memcpy(iv, skb->data + OVPN_OPCODE_SIZE, OVPN_NONCE_WIRE_SIZE);
memcpy(iv + OVPN_NONCE_WIRE_SIZE, ks->nonce_tail_recv,
OVPN_NONCE_TAIL_SIZE);
req = aead_request_alloc(ks->decrypt, GFP_ATOMIC); if (unlikely(!req)) return -ENOMEM;
ret = crypto_aead_setkey(aead, key, keylen); if (ret) {
pr_err("%s crypto_aead_setkey size=%u failed, err=%d\n", title,
keylen, ret); goto error;
}
ret = crypto_aead_setauthsize(aead, OVPN_AUTH_TAG_SIZE); if (ret) {
pr_err("%s crypto_aead_setauthsize failed, err=%d\n", title,
ret); goto error;
}
/* basic AEAD assumption */ if (crypto_aead_ivsize(aead) != OVPN_NONCE_SIZE) {
pr_err("%s IV size must be %d\n", title, OVPN_NONCE_SIZE);
ret = -EINVAL; goto error;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.