while (true) {
state = atomic_cmpxchg(&pf->ipsec.cpt_state,
CN10K_CPT_HW_AVAILABLE,
CN10K_CPT_HW_IN_USE); if (state == CN10K_CPT_HW_AVAILABLE) returntrue; if (state == CN10K_CPT_HW_UNAVAILABLE) returnfalse;
/* Wait for instruction queue to become empty. * CPT_LF_INPROG.INFLIGHT count is zero
*/ do {
reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG);
inflight = FIELD_GET(CPT_LF_INPROG_INFLIGHT, reg_val); if (!inflight) break;
usleep_range(10000, 20000); if (timeout-- < 0) {
netdev_err(pf->netdev, "Timeout to cleanup CPT IQ\n"); break;
}
} while (1);
/* Disable executions in the LF's queue, * the queue should be empty at this point
*/
reg_val &= ~BIT_ULL(16);
otx2_write64(pf, CN10K_CPT_LF_INPROG, reg_val);
/* Wait for instruction queue to become empty */
cnt = 0; do {
reg_val = otx2_read64(pf, CN10K_CPT_LF_INPROG); if (reg_val & BIT_ULL(31))
cnt = 0; else
cnt++;
reg_val = otx2_read64(pf, CN10K_CPT_LF_Q_GRP_PTR);
nq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
dq_ptr = FIELD_GET(CPT_LF_Q_GRP_PTR_DQ_PTR, reg_val);
} while ((cnt < 10) && (nq_ptr != dq_ptr));
/* Set IQ base address and size to 0 */
otx2_write64(pf, CN10K_CPT_LF_Q_BASE, 0);
otx2_write64(pf, CN10K_CPT_LF_Q_SIZE, 0);
/* Free CPTLF IQ */
cn10k_outb_cptlf_iq_free(pf);
/* Free and detach CPT LF */
cn10k_outb_cptlf_free(pf);
ret = cn10k_outb_cptlf_detach(pf); if (ret)
netdev_err(pf->netdev, "Failed to detach CPT LF\n");
lmt_info = per_cpu_ptr(pf->hw.lmt_info, smp_processor_id()); /* FIXME: val[0:10] LMT_ID. * [12:15] no of LMTST - 1 in the burst. * [19:63] data size of each LMTST in the burst except first.
*/
val = (lmt_info->lmt_id & 0x7FF); /* Target address for LMTST flush tells HW how many 128bit * words are present. * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
*/
tar_addr |= pf->ipsec.io_addr | (((size / 16) - 1) & 0x7) << 4;
dma_wmb();
memcpy((u64 *)lmt_info->lmt_addr, inst, size);
cn10k_lmt_flush(val, tar_addr);
}
/* Check if CPT-LF available */ if (!cn10k_cpt_device_set_inuse(pf)) {
ret = -ENODEV; goto free_mem;
}
cn10k_cpt_inst_flush(pf, &inst, sizeof(struct cpt_inst_s));
dma_wmb();
ret = cn10k_wait_for_cpt_respose(pf, res); if (ret) goto set_available;
/* Trigger CTX flush to write dirty data back to DRAM */
reg_val = FIELD_PREP(CPT_LF_CTX_FLUSH_CPTR, sa_iova >> 7);
otx2_write64(pf, CN10K_CPT_LF_CTX_FLUSH, reg_val);
/* Write SA context data to memory before enabling */
wmb();
/* Enable SA */
sa_entry->sa_valid = 1;
}
staticint cn10k_ipsec_validate_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
{ if (x->props.aalgo != SADB_AALG_NONE) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states"); return -EINVAL;
} if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
NL_SET_ERR_MSG_MOD(extack, "Only AES-GCM-ICV16 xfrm state may be offloaded"); return -EINVAL;
} if (x->props.calgo != SADB_X_CALG_NONE) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states"); return -EINVAL;
} if (x->props.flags & XFRM_STATE_ESN) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states"); return -EINVAL;
} if (x->props.family != AF_INET && x->props.family != AF_INET6) {
NL_SET_ERR_MSG_MOD(extack, "Only IPv4/v6 xfrm states may be offloaded"); return -EINVAL;
} if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload other than crypto-mode"); return -EINVAL;
} if (x->props.mode != XFRM_MODE_TRANSPORT &&
x->props.mode != XFRM_MODE_TUNNEL) {
NL_SET_ERR_MSG_MOD(extack, "Only tunnel/transport xfrm states may be offloaded"); return -EINVAL;
} if (x->id.proto != IPPROTO_ESP) {
NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state may be offloaded"); return -EINVAL;
} if (x->encap) {
NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state may not be offloaded"); return -EINVAL;
} if (!x->aead) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead"); return -EINVAL;
}
if (x->aead->alg_icv_len != 128) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 128bit"); return -EINVAL;
} if (x->aead->alg_key_len != 128 + 32 &&
x->aead->alg_key_len != 192 + 32 &&
x->aead->alg_key_len != 256 + 32) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD key length other than 128/192/256bit"); return -EINVAL;
} if (x->tfcpad) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding"); return -EINVAL;
} if (!x->geniv) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv"); return -EINVAL;
} if (strcmp(x->geniv, "seqiv")) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv"); return -EINVAL;
} return 0;
}
x->xso.offload_handle = (unsignedlong)sa_info; /* Enable static branch when first SA setup */ if (!pf->ipsec.outb_sa_count)
static_branch_enable(&cn10k_ipsec_sa_enabled);
pf->ipsec.outb_sa_count++; return 0;
}
/* If no more SA's then update netdev feature for potential change * in NETIF_F_HW_ESP.
*/ if (!--pf->ipsec.outb_sa_count)
queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work);
}
/* Disable static branch when no more SA enabled */
static_branch_disable(&cn10k_ipsec_sa_enabled);
rtnl_lock();
netdev_update_features(pf->netdev);
rtnl_unlock();
}
/* Save DMA mapping info for later unmapping */
sq->sg[sq->head].dma_addr[seg] = dma_addr;
sq->sg[sq->head].size[seg] = len;
sq->sg[sq->head].num_segs++;
/* Check for IPSEC offload enabled */ if (!(pf->flags & OTX2_FLAG_IPSEC_OFFLOAD_ENABLED)) goto drop;
sp = skb_sec_path(skb); if (unlikely(!sp->len)) goto drop;
x = xfrm_input_state(skb); if (unlikely(!x)) goto drop;
if (x->props.mode != XFRM_MODE_TRANSPORT &&
x->props.mode != XFRM_MODE_TUNNEL) goto drop;
dlen = cn10k_ipsec_get_ip_data_len(x, skb); if (dlen == 0 && netif_msg_tx_err(pf)) {
netdev_err(pf->netdev, "Invalid IP header, ip-length zero\n"); goto drop;
}
/* Check for valid SA context */
sa_info = (struct qmem *)x->xso.offload_handle; if (!sa_info) goto drop;
memset(&inst, 0, sizeof(struct cpt_inst_s));
/* Get authentication offset */ if (x->props.family == AF_INET)
auth_offset = sizeof(struct iphdr); else
auth_offset = sizeof(struct ipv6hdr);
/* IV offset is after ESP header */
iv_offset = auth_offset + sizeof(struct ip_esp_hdr); /* Encap will start after IV */
encap_offset = iv_offset + GCM_RFC4106_IV_SIZE;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.