// SPDX-License-Identifier: GPL-2.0 /* Marvell OcteonTX CPT driver * * Copyright (C) 2019 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation.
*/
if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
dev_err(dev, "unsupported number of engines %d on octeontx\n",
eng_grp->g->engs_num); return bmap;
}
for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) { if (eng_grp->engs[i].type) {
bitmap_or(bmap.bits, bmap.bits,
eng_grp->engs[i].bmap,
eng_grp->g->engs_num);
bmap.size = eng_grp->g->engs_num;
found = true;
}
}
if (!found)
dev_err(dev, "No engines reserved for engine group %d\n",
eng_grp->idx); return bmap;
}
staticint is_eng_type(int val, int eng_type)
{ return val & (1 << eng_type);
}
staticint get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
{ char tmp_ver_str[OTX_CPT_UCODE_VER_STR_SZ];
u32 i, val = 0;
u8 nn;
strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ); for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
nn = ucode_hdr->ver_num.nn; if (strnstr(tmp_ver_str, "se-", OTX_CPT_UCODE_VER_STR_SZ) &&
(nn == OTX_CPT_SE_UC_TYPE1 || nn == OTX_CPT_SE_UC_TYPE2 ||
nn == OTX_CPT_SE_UC_TYPE3))
val |= 1 << OTX_CPT_SE_TYPES; if (strnstr(tmp_ver_str, "ae", OTX_CPT_UCODE_VER_STR_SZ) &&
nn == OTX_CPT_AE_UC_TYPE)
val |= 1 << OTX_CPT_AE_TYPES;
*ucode_type = val;
if (!val) return -EINVAL; if (is_eng_type(val, OTX_CPT_AE_TYPES) &&
is_eng_type(val, OTX_CPT_SE_TYPES)) return -EINVAL; return 0;
}
staticint is_mem_zero(constchar *ptr, int size)
{ int i;
for (i = 0; i < size; i++) { if (ptr[i]) return 0;
} return 1;
}
bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp); if (!bmap.size) return -EINVAL;
if (eng_grp->mirror.is_ena)
dma_addr =
eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].align_dma; else
dma_addr = eng_grp->ucode[0].align_dma;
/* * Set UCODE_BASE only for the cores which are not used, * other cores should have already valid UCODE_BASE set
*/
for_each_set_bit(i, bmap.bits, bmap.size) if (!eng_grp->g->eng_ref_cnt[i])
writeq((u64) dma_addr, cpt->reg_base +
OTX_CPT_PF_ENGX_UCODE_BASE(i)); return 0;
}
/* Disable the cores only if they are not used anymore */
reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
for_each_set_bit(i, bmap.bits, bmap.size) if (!eng_grp->g->eng_ref_cnt[i])
reg &= ~(1ull << i);
writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
/* * If size is less than microcode header size then don't report * an error because it might not be microcode file, just process * next file from archive
*/ if (size < sizeof(struct otx_cpt_ucode_hdr)) return 0;
ucode_hdr = (struct otx_cpt_ucode_hdr *) data; /* * If microcode version can't be found don't report an error * because it might not be microcode file, just process next file
*/ if (get_ucode_type(ucode_hdr, &ucode_type)) return 0;
/* Check for the end of the archive */ if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
dev_err(dev, "Invalid tar archive %s\n", tar_filename); goto release_tar_arch;
}
if (is_mem_zero(&tar_arch->fw->data[tar_offs],
2*TAR_BLOCK_LEN)) break;
/* Read next block from tar archive */
tar_blk = (struct tar_blk_t *) &tar_arch->fw->data[tar_offs];
}
pr_debug("Engine groups global info\n");
pr_debug("max SE %d, max AE %d\n",
eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
pr_debug("free SE %d\n", eng_grps->avail.se_cnt);
pr_debug("free AE %d\n", eng_grps->avail.ae_cnt);
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
pr_debug("engine_group%d, state %s\n", i,
str_enabled_disabled(grp->is_enabled)); if (grp->is_enabled) {
mirrored_grp = &eng_grps->grp[grp->mirror.idx];
pr_debug("Ucode0 filename %s, version %s\n",
grp->mirror.is_ena ?
mirrored_grp->ucode[0].filename :
grp->ucode[0].filename,
grp->mirror.is_ena ?
mirrored_grp->ucode[0].ver_str :
grp->ucode[0].ver_str);
}
switch (req_eng->type) { case OTX_CPT_SE_TYPES:
avail_cnt = grp->g->avail.se_cnt; break;
case OTX_CPT_AE_TYPES:
avail_cnt = grp->g->avail.ae_cnt; break;
default:
dev_err(dev, "Invalid engine type %d\n", req_eng->type); return -EINVAL;
}
if (avail_cnt < req_eng->count) {
dev_err(dev, "Error available %s engines %d < than requested %d\n",
get_eng_type_str(req_eng->type),
avail_cnt, req_eng->count); return -EBUSY;
}
return 0;
}
staticint reserve_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp, struct otx_cpt_engines *req_engs, int req_cnt)
{ int i, ret;
/* Validate if a number of requested engines is available */ for (i = 0; i < req_cnt; i++) {
ret = check_engines_availability(dev, grp, &req_engs[i]); if (ret) return ret;
}
/* Reserve requested engines for this engine group */ for (i = 0; i < req_cnt; i++) {
ret = do_reserve_engines(dev, grp, &req_engs[i]); if (ret) return ret;
} return 0;
}
staticvoid update_requested_engs(struct otx_cpt_eng_grp_info *mirrored_eng_grp, struct otx_cpt_engines *engs, int engs_cnt)
{ struct otx_cpt_engs_rsvd *mirrored_engs; int i;
for (i = 0; i < engs_cnt; i++) {
mirrored_engs = find_engines_by_type(mirrored_eng_grp,
engs[i].type); if (!mirrored_engs) continue;
/* * If mirrored group has this type of engines attached then * there are 3 scenarios possible: * 1) mirrored_engs.count == engs[i].count then all engines * from mirrored engine group will be shared with this engine * group * 2) mirrored_engs.count > engs[i].count then only a subset of * engines from mirrored engine group will be shared with this * engine group * 3) mirrored_engs.count < engs[i].count then all engines * from mirrored engine group will be shared with this group * and additional engines will be reserved for exclusively use * by this engine group
*/
engs[i].count -= mirrored_engs->count;
}
}
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { if (!eng_grps->grp[i].is_enabled) continue; if (eng_grps->grp[i].ucode[0].type) continue; if (grp->idx == i) continue; if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
grp->ucode[0].ver_str,
OTX_CPT_UCODE_VER_STR_SZ)) return &eng_grps->grp[i];
}
return NULL;
}
staticstruct otx_cpt_eng_grp_info *find_unused_eng_grp( struct otx_cpt_eng_grps *eng_grps)
{ int i;
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { if (!eng_grps->grp[i].is_enabled) return &eng_grps->grp[i];
} return NULL;
}
staticint eng_grp_update_masks(struct device *dev, struct otx_cpt_eng_grp_info *eng_grp)
{ struct otx_cpt_engs_rsvd *engs, *mirrored_engs; struct otx_cpt_bitmap tmp_bmap = { {0} }; int i, j, cnt, max_cnt; int bit;
for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
engs = &eng_grp->engs[i]; if (!engs->type) continue; if (engs->count <= 0) continue;
switch (engs->type) { case OTX_CPT_SE_TYPES:
max_cnt = eng_grp->g->avail.max_se_cnt; break;
case OTX_CPT_AE_TYPES:
max_cnt = eng_grp->g->avail.max_ae_cnt; break;
default:
dev_err(dev, "Invalid engine type %d\n", engs->type); return -EINVAL;
}
staticint delete_engine_group(struct device *dev, struct otx_cpt_eng_grp_info *eng_grp)
{ int i, ret;
if (!eng_grp->is_enabled) return -EINVAL;
if (eng_grp->mirror.ref_count) {
dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):",
eng_grp->idx); for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) { if (eng_grp->g->grp[i].mirror.is_ena &&
eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
pr_cont(" %d", i);
}
pr_cont("\n"); return -EINVAL;
}
/* Removing engine group mirroring if enabled */
remove_eng_grp_mirroring(eng_grp);
/* Disable engine group */
ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj); if (ret) return ret;
/* Release all engines held by this engine group */
ret = release_engines(dev, eng_grp); if (ret) return ret;
staticint validate_1_ucode_scenario(struct device *dev, struct otx_cpt_eng_grp_info *eng_grp, struct otx_cpt_engines *engs, int engs_cnt)
{ int i;
/* Verify that ucode loaded supports requested engine types */ for (i = 0; i < engs_cnt; i++) { if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
engs[i].type)) {
dev_err(dev, "Microcode %s does not support %s engines\n",
eng_grp->ucode[0].filename,
get_eng_type_str(engs[i].type)); return -EINVAL;
}
} return 0;
}
staticint create_engine_group(struct device *dev, struct otx_cpt_eng_grps *eng_grps, struct otx_cpt_engines *engs, int engs_cnt, void *ucode_data[], int ucodes_cnt, bool use_uc_from_tar_arch)
{ struct otx_cpt_eng_grp_info *mirrored_eng_grp; struct tar_ucode_info_t *tar_info; struct otx_cpt_eng_grp_info *eng_grp; int i, ret = 0;
if (ucodes_cnt > OTX_CPT_MAX_ETYPES_PER_GRP) return -EINVAL;
/* Validate if requested engine types are supported by this device */ for (i = 0; i < engs_cnt; i++) if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
dev_err(dev, "Device does not support %s engines\n",
get_eng_type_str(engs[i].type)); return -EPERM;
}
/* Find engine group which is not used */
eng_grp = find_unused_eng_grp(eng_grps); if (!eng_grp) {
dev_err(dev, "Error all engine groups are being used\n"); return -ENOSPC;
}
/* Load ucode */ for (i = 0; i < ucodes_cnt; i++) { if (use_uc_from_tar_arch) {
tar_info = (struct tar_ucode_info_t *) ucode_data[i];
eng_grp->ucode[i] = tar_info->ucode;
ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
tar_info->ucode_ptr);
} else
ret = ucode_load(dev, &eng_grp->ucode[i],
(char *) ucode_data[i]); if (ret) goto err_ucode_unload;
}
/* Validate scenario where 1 ucode is used */
ret = validate_1_ucode_scenario(dev, eng_grp, engs, engs_cnt); if (ret) goto err_ucode_unload;
/* Check if this group mirrors another existing engine group */
mirrored_eng_grp = find_mirrored_eng_grp(eng_grp); if (mirrored_eng_grp) { /* Setup mirroring */
setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
/* * Update count of requested engines because some * of them might be shared with mirrored group
*/
update_requested_engs(mirrored_eng_grp, engs, engs_cnt);
}
/* Reserve engines */
ret = reserve_engines(dev, eng_grp, engs, engs_cnt); if (ret) goto err_ucode_unload;
/* Update ucode pointers used by engines */
update_ucode_ptrs(eng_grp);
/* Update engine masks used by this group */
ret = eng_grp_update_masks(dev, eng_grp); if (ret) goto err_release_engs;
/* Create sysfs entry for engine group info */
ret = create_sysfs_eng_grps_info(dev, eng_grp); if (ret) goto err_release_engs;
/* Enable engine group */
ret = enable_eng_grp(eng_grp, eng_grps->obj); if (ret) goto err_release_engs;
/* * If this engine group mirrors another engine group * then we need to unload ucode as we will use ucode * from mirrored engine group
*/ if (eng_grp->mirror.is_ena)
ucode_unload(dev, &eng_grp->ucode[0]);
eng_grp->is_enabled = true; if (eng_grp->mirror.is_ena)
dev_info(dev, "Engine_group%d: reuse microcode %s from group %d\n",
eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
mirrored_eng_grp->idx); else
dev_info(dev, "Engine_group%d: microcode loaded %s\n",
eng_grp->idx, eng_grp->ucode[0].ver_str);
for (;;) {
val = strsep(&start, ";"); if (!val) break;
val = strim(val); if (!*val) continue;
if (!strncasecmp(val, "engine_group", 12)) { if (del_grp_idx != -1) goto err_print;
tmp = strim(strsep(&val, ":")); if (!val) goto err_print; if (strlen(tmp) != 13) goto err_print; if (kstrtoint((tmp + 12), 10, &del_grp_idx)) goto err_print;
val = strim(val); if (strncasecmp(val, "null", 4)) goto err_print; if (strlen(val) != 4) goto err_print;
} elseif (!strncasecmp(val, "se", 2) && strchr(val, ':')) { if (has_se || ucode_idx) goto err_print;
tmp = strim(strsep(&val, ":")); if (!val) goto err_print; if (strlen(tmp) != 2) goto err_print; if (kstrtoint(strim(val), 10, &engs[grp_idx].count)) goto err_print;
engs[grp_idx++].type = OTX_CPT_SE_TYPES;
has_se = true;
} elseif (!strncasecmp(val, "ae", 2) && strchr(val, ':')) { if (has_ae || ucode_idx) goto err_print;
tmp = strim(strsep(&val, ":")); if (!val) goto err_print; if (strlen(tmp) != 2) goto err_print; if (kstrtoint(strim(val), 10, &engs[grp_idx].count)) goto err_print;
engs[grp_idx++].type = OTX_CPT_AE_TYPES;
has_ae = true;
} else { if (ucode_idx > 1) goto err_print; if (!strlen(val)) goto err_print; if (strnstr(val, " ", strlen(val))) goto err_print;
ucode_filename[ucode_idx++] = val;
}
}
/* Validate input parameters */ if (del_grp_idx == -1) { if (!(grp_idx && ucode_idx)) goto err_print;
if (ucode_idx > 1 && grp_idx < 2) goto err_print;
if (grp_idx > OTX_CPT_MAX_ETYPES_PER_GRP) {
err_msg = "Error max 2 engine types can be attached"; goto err_print;
}
} else { if (del_grp_idx < 0 ||
del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
dev_err(dev, "Invalid engine group index %d\n",
del_grp_idx);
ret = -EINVAL; return ret;
}
if (!eng_grps->grp[del_grp_idx].is_enabled) {
dev_err(dev, "Error engine_group%d is not configured\n",
del_grp_idx);
ret = -EINVAL; return ret;
}
if (grp_idx || ucode_idx) goto err_print;
}
mutex_lock(&eng_grps->lock);
if (eng_grps->is_rdonly) {
dev_err(dev, "Disable VFs before modifying engine groups\n");
ret = -EACCES; goto err_unlock;
}
if (del_grp_idx == -1) /* create engine group */
ret = create_engine_group(dev, eng_grps, engs, grp_idx,
(void **) ucode_filename,
ucode_idx, false); else /* delete engine group */
ret = delete_engine_group(dev, &eng_grps->grp[del_grp_idx]); if (ret) goto err_unlock;
print_dbg_info(dev, eng_grps);
err_unlock:
mutex_unlock(&eng_grps->lock); return ret ? ret : count;
err_print:
dev_err(dev, "%s\n", err_msg);
return ret;
}
int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev, struct otx_cpt_eng_grps *eng_grps, int pf_type)
{ struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = {}; struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = {}; struct tar_arch_info_t *tar_arch = NULL; char *tar_filename; int i, ret = 0;
mutex_lock(&eng_grps->lock);
/* * We don't create engine group for kernel crypto if attempt to create * it was already made (when user enabled VFs for the first time)
*/ if (eng_grps->is_first_try) goto unlock_mutex;
eng_grps->is_first_try = true;
/* We create group for kcrypto only if no groups are configured */ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) if (eng_grps->grp[i].is_enabled) goto unlock_mutex;
switch (pf_type) { case OTX_CPT_AE: case OTX_CPT_SE:
tar_filename = OTX_CPT_UCODE_TAR_FILE_NAME; break;
default:
dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
ret = -EINVAL; goto unlock_mutex;
}
tar_arch = load_tar_archive(&pdev->dev, tar_filename); if (!tar_arch) goto unlock_mutex;
/* * If device supports SE engines and there is SE microcode in tar * archive try to create engine group with SE engines for kernel * crypto functionality (symmetric crypto)
*/
tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_SE_TYPES); if (tar_info[0] &&
dev_supports_eng_type(eng_grps, OTX_CPT_SE_TYPES)) {
ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
(void **) tar_info, 1, true); if (ret) goto release_tar_arch;
} /* * If device supports AE engines and there is AE microcode in tar * archive try to create engine group with AE engines for asymmetric * crypto functionality.
*/
tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_AE_TYPES); if (tar_info[0] &&
dev_supports_eng_type(eng_grps, OTX_CPT_AE_TYPES)) {
/* Disengage the cores from groups */ for (grp = 0; grp < OTX_CPT_MAX_ENGINE_GROUPS; grp++) {
writeq(0, cpt->reg_base + OTX_CPT_PF_GX_EN(grp));
udelay(CSR_DELAY);
}
reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY); while (reg) {
udelay(CSR_DELAY);
reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY); if (timeout--) {
dev_warn(&cpt->pdev->dev, "Cores still busy\n"); break;
}
}
/* Disable the cores */
writeq(0, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
}
void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev, struct otx_cpt_eng_grps *eng_grps)
{ struct otx_cpt_eng_grp_info *grp; int i, j;
mutex_lock(&eng_grps->lock); if (eng_grps->is_ucode_load_created) {
device_remove_file(&pdev->dev,
&eng_grps->ucode_load_attr);
eng_grps->is_ucode_load_created = false;
}
/* First delete all mirroring engine groups */ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) if (eng_grps->grp[i].mirror.is_ena)
delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
/* Delete remaining engine groups */ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
/* Release memory */ for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i]; for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
kfree(grp->engs[j].bmap);
grp->engs[j].bmap = NULL;
}
}
mutex_unlock(&eng_grps->lock);
}
int otx_cpt_init_eng_grps(struct pci_dev *pdev, struct otx_cpt_eng_grps *eng_grps, int pf_type)
{ struct otx_cpt_eng_grp_info *grp; int i, j, ret = 0;
eng_grps->engs_num = eng_grps->avail.max_se_cnt +
eng_grps->avail.max_ae_cnt; if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
dev_err(&pdev->dev, "Number of engines %d > than max supported %d\n",
eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
ret = -EINVAL; goto err;
}
for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
grp = &eng_grps->grp[i];
grp->g = eng_grps;
grp->idx = i;
snprintf(grp->sysfs_info_name, OTX_CPT_UCODE_NAME_LENGTH, "engine_group%d", i); for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
grp->engs[j].bmap =
kcalloc(BITS_TO_LONGS(eng_grps->engs_num), sizeof(long), GFP_KERNEL); if (!grp->engs[j].bmap) {
ret = -ENOMEM; goto err;
}
}
}
switch (pf_type) { case OTX_CPT_SE: /* OcteonTX 83XX SE CPT PF has only SE engines attached */
eng_grps->eng_types_supported = 1 << OTX_CPT_SE_TYPES; break;
case OTX_CPT_AE: /* OcteonTX 83XX AE CPT PF has only AE engines attached */
eng_grps->eng_types_supported = 1 << OTX_CPT_AE_TYPES; break;
default:
dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
ret = -EINVAL; goto err;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.