/* * Interval to flush dirty data for next CTX entry. The interval is measured * in increments of 10ns(interval time = CTX_FLUSH_TIMER_COUNT * 10ns).
*/ #define CTX_FLUSH_TIMER_CNT 0x2FAF0
if (eng_grp->g->engs_num < 0 ||
eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
dev_err(dev, "unsupported number of engines %d on octeontx2\n",
eng_grp->g->engs_num); return bmap;
}
for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) { if (eng_grp->engs[i].type) {
bitmap_or(bmap.bits, bmap.bits,
eng_grp->engs[i].bmap,
eng_grp->g->engs_num);
bmap.size = eng_grp->g->engs_num;
found = true;
}
}
if (!found)
dev_err(dev, "No engines reserved for engine group %d\n",
eng_grp->idx); return bmap;
}
staticint is_eng_type(int val, int eng_type)
{ return val & (1 << eng_type);
}
staticint get_ucode_type(struct device *dev, struct otx2_cpt_ucode_hdr *ucode_hdr, int *ucode_type, u16 rid)
{ char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ]; char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ]; int i, val = 0;
u8 nn;
strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ); for (i = 0; i < strlen(tmp_ver_str); i++)
tmp_ver_str[i] = tolower(tmp_ver_str[i]);
sprintf(ver_str_prefix, "ocpt-%02d", rid); if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ)) return -EINVAL;
nn = ucode_hdr->ver_num.nn; if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
(nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
nn == OTX2_CPT_SE_UC_TYPE3))
val |= 1 << OTX2_CPT_SE_TYPES; if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
(nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
nn == OTX2_CPT_IE_UC_TYPE3))
val |= 1 << OTX2_CPT_IE_TYPES; if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
nn == OTX2_CPT_AE_UC_TYPE)
val |= 1 << OTX2_CPT_AE_TYPES;
*ucode_type = val;
if (!val) return -EINVAL;
return 0;
}
staticint __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
dma_addr_t dma_addr, int blkaddr)
{ return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_UCODE_BASE(eng),
(u64)dma_addr, blkaddr);
}
staticint cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, struct otx2_cptpf_dev *cptpf, int blkaddr)
{ struct otx2_cpt_engs_rsvd *engs;
dma_addr_t dma_addr; int i, bit, ret;
/* Set PF number for microcode fetches */
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_PF_FUNC,
rvu_make_pcifunc(cptpf->pdev,
cptpf->pf_id, 0),
blkaddr); if (ret) return ret;
for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
engs = &eng_grp->engs[i]; if (!engs->type) continue;
dma_addr = engs->ucode->dma;
/* * Set UCODE_BASE only for the cores which are not used, * other cores should have already valid UCODE_BASE set
*/
for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num) if (!eng_grp->g->eng_ref_cnt[bit]) {
ret = __write_ucode_base(cptpf, bit, dma_addr,
blkaddr); if (ret) return ret;
}
} return 0;
}
if (cptpf->has_cpt1) {
ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1); if (ret) return ret;
} return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
}
staticint cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp, struct otx2_cptpf_dev *cptpf, struct otx2_cpt_bitmap bmap, int blkaddr)
{ int i, timeout = 10; int busy, ret;
u64 reg = 0;
/* Detach the cores from group */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL2(i), ®, blkaddr); if (ret) return ret;
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_CTL2(i), reg,
blkaddr); if (ret) return ret;
}
}
/* Wait for cores to become idle */ do {
busy = 0;
usleep_range(10000, 20000); if (timeout-- < 0) return -EBUSY;
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_STS(i), ®,
blkaddr); if (ret) return ret;
if (reg & 0x1) {
busy = 1; break;
}
}
} while (busy);
/* Disable the cores only if they are not used anymore */
for_each_set_bit(i, bmap.bits, bmap.size) { if (!eng_grp->g->eng_ref_cnt[i]) {
ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_CTL(i), 0x0,
blkaddr); if (ret) return ret;
}
}
bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp); if (!bmap.size) return -EINVAL;
if (cptpf->has_cpt1) {
ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
BLKADDR_CPT1); if (ret) return ret;
} return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
BLKADDR_CPT0);
}
staticint cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp, struct otx2_cptpf_dev *cptpf, struct otx2_cpt_bitmap bmap, int blkaddr)
{
u64 reg = 0; int i, ret;
/* Attach the cores to the group */
for_each_set_bit(i, bmap.bits, bmap.size) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL2(i), ®, blkaddr); if (ret) return ret;
staticint cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info,
u16 rid)
{ char filename[OTX2_CPT_NAME_LENGTH]; char eng_type[8] = {0}; int ret, e, i;
INIT_LIST_HEAD(&fw_info->ucodes);
for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
strcpy(eng_type, get_eng_type_str(e)); for (i = 0; i < strlen(eng_type); i++)
eng_type[i] = tolower(eng_type[i]);
snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
rid, eng_type); /* Request firmware for each engine type */
ret = load_fw(&pdev->dev, fw_info, filename, rid); if (ret) goto release_fw;
}
print_uc_info(fw_info); return 0;
switch (req_eng->type) { case OTX2_CPT_SE_TYPES:
avail_cnt = grp->g->avail.se_cnt; break;
case OTX2_CPT_IE_TYPES:
avail_cnt = grp->g->avail.ie_cnt; break;
case OTX2_CPT_AE_TYPES:
avail_cnt = grp->g->avail.ae_cnt; break;
default:
dev_err(dev, "Invalid engine type %d\n", req_eng->type); return -EINVAL;
}
if (avail_cnt < req_eng->count) {
dev_err(dev, "Error available %s engines %d < than requested %d\n",
get_eng_type_str(req_eng->type),
avail_cnt, req_eng->count); return -EBUSY;
} return 0;
}
staticint reserve_engines(struct device *dev, struct otx2_cpt_eng_grp_info *grp, struct otx2_cpt_engines *req_engs, int ucodes_cnt)
{ int i, ret = 0;
/* Validate if a number of requested engines are available */ for (i = 0; i < ucodes_cnt; i++) {
ret = check_engines_availability(dev, grp, &req_engs[i]); if (ret) return ret;
}
/* Reserve requested engines for this engine group */ for (i = 0; i < ucodes_cnt; i++) {
ret = do_reserve_engines(dev, grp, &req_engs[i]); if (ret) return ret;
} return 0;
}
staticvoid update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp, struct otx2_cpt_engines *engs, int engs_cnt)
{ struct otx2_cpt_engs_rsvd *mirrored_engs; int i;
for (i = 0; i < engs_cnt; i++) {
mirrored_engs = find_engines_by_type(mirror_eng_grp,
engs[i].type); if (!mirrored_engs) continue;
/* * If mirrored group has this type of engines attached then * there are 3 scenarios possible: * 1) mirrored_engs.count == engs[i].count then all engines * from mirrored engine group will be shared with this engine * group * 2) mirrored_engs.count > engs[i].count then only a subset of * engines from mirrored engine group will be shared with this * engine group * 3) mirrored_engs.count < engs[i].count then all engines * from mirrored engine group will be shared with this group * and additional engines will be reserved for exclusively use * by this engine group
*/
engs[i].count -= mirrored_engs->count;
}
}
for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) { if (!eng_grps->grp[i].is_enabled) continue; if (eng_grps->grp[i].ucode[0].type &&
eng_grps->grp[i].ucode[1].type) continue; if (grp->idx == i) continue; if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
grp->ucode[0].ver_str,
OTX2_CPT_UCODE_VER_STR_SZ)) return &eng_grps->grp[i];
}
return NULL;
}
staticstruct otx2_cpt_eng_grp_info *find_unused_eng_grp( struct otx2_cpt_eng_grps *eng_grps)
{ int i;
for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) { if (!eng_grps->grp[i].is_enabled) return &eng_grps->grp[i];
} return NULL;
}
staticint eng_grp_update_masks(struct device *dev, struct otx2_cpt_eng_grp_info *eng_grp)
{ struct otx2_cpt_engs_rsvd *engs, *mirrored_engs; struct otx2_cpt_bitmap tmp_bmap = { {0} }; int i, j, cnt, max_cnt; int bit;
for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
engs = &eng_grp->engs[i]; if (!engs->type) continue; if (engs->count <= 0) continue;
switch (engs->type) { case OTX2_CPT_SE_TYPES:
max_cnt = eng_grp->g->avail.max_se_cnt; break;
case OTX2_CPT_IE_TYPES:
max_cnt = eng_grp->g->avail.max_ie_cnt; break;
case OTX2_CPT_AE_TYPES:
max_cnt = eng_grp->g->avail.max_ae_cnt; break;
default:
dev_err(dev, "Invalid engine type %d\n", engs->type); return -EINVAL;
}
if (eng_grp->engs[1].type) { if (is_2nd_ucode_used(eng_grp))
eng_grp->engs[1].ucode = &eng_grp->ucode[1]; else
eng_grp->engs[1].ucode = ucode;
}
}
staticint create_engine_group(struct device *dev, struct otx2_cpt_eng_grps *eng_grps, struct otx2_cpt_engines *engs, int ucodes_cnt, void *ucode_data[], int is_print)
{ struct otx2_cpt_eng_grp_info *mirrored_eng_grp; struct otx2_cpt_eng_grp_info *eng_grp; struct otx2_cpt_uc_info_t *uc_info; int i, ret = 0;
/* Find engine group which is not used */
eng_grp = find_unused_eng_grp(eng_grps); if (!eng_grp) {
dev_err(dev, "Error all engine groups are being used\n"); return -ENOSPC;
} /* Load ucode */ for (i = 0; i < ucodes_cnt; i++) {
uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
eng_grp->ucode[i] = uc_info->ucode;
ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
uc_info->fw->data); if (ret) goto unload_ucode;
}
/* Check if this group mirrors another existing engine group */
mirrored_eng_grp = find_mirrored_eng_grp(eng_grp); if (mirrored_eng_grp) { /* Setup mirroring */
setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
/* * Update count of requested engines because some * of them might be shared with mirrored group
*/
update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
}
ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt); if (ret) goto unload_ucode;
/* Update ucode pointers used by engines */
update_ucode_ptrs(eng_grp);
/* Update engine masks used by this group */
ret = eng_grp_update_masks(dev, eng_grp); if (ret) goto release_engs;
/* Enable engine group */
ret = enable_eng_grp(eng_grp, eng_grps->obj); if (ret) goto release_engs;
/* * If this engine group mirrors another engine group * then we need to unload ucode as we will use ucode * from mirrored engine group
*/ if (eng_grp->mirror.is_ena)
ucode_unload(dev, &eng_grp->ucode[0]);
eng_grp->is_enabled = true;
if (!is_print) return 0;
if (mirrored_eng_grp)
dev_info(dev, "Engine_group%d: reuse microcode %s from group %d\n",
eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
mirrored_eng_grp->idx); else
dev_info(dev, "Engine_group%d: microcode loaded %s\n",
eng_grp->idx, eng_grp->ucode[0].ver_str); if (is_2nd_ucode_used(eng_grp))
dev_info(dev, "Engine_group%d: microcode loaded %s\n",
eng_grp->idx, eng_grp->ucode[1].ver_str);
staticvoid delete_engine_grps(struct pci_dev *pdev, struct otx2_cpt_eng_grps *eng_grps)
{ int i;
/* First delete all mirroring engine groups */ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) if (eng_grps->grp[i].mirror.is_ena)
delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
/* Delete remaining engine groups */ for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
}
mutex_lock(&eng_grps->lock); /* * We don't create engine groups if it was already * made (when user enabled VFs for the first time)
*/ if (eng_grps->is_grps_created) goto unlock;
ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid); if (ret) goto unlock;
/* * Create engine group with SE engines for kernel * crypto functionality (symmetric crypto)
*/
uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES); if (uc_info[0] == NULL) {
dev_err(&pdev->dev, "Unable to find firmware for SE\n");
ret = -EINVAL; goto release_fw;
}
engs[0].type = OTX2_CPT_SE_TYPES;
engs[0].count = eng_grps->avail.max_se_cnt;
ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
(void **) uc_info, 1); if (ret) goto release_fw;
/* * Create engine group with SE+IE engines for IPSec. * All SE engines will be shared with engine group 0.
*/
uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
if (uc_info[1] == NULL) {
dev_err(&pdev->dev, "Unable to find firmware for IE");
ret = -EINVAL; goto delete_eng_grp;
}
engs[0].type = OTX2_CPT_SE_TYPES;
engs[0].count = eng_grps->avail.max_se_cnt;
engs[1].type = OTX2_CPT_IE_TYPES;
engs[1].count = eng_grps->avail.max_ie_cnt;
ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
(void **) uc_info, 1); if (ret) goto delete_eng_grp;
/* * Create engine group with AE engines for asymmetric * crypto functionality.
*/
uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES); if (uc_info[0] == NULL) {
dev_err(&pdev->dev, "Unable to find firmware for AE");
ret = -EINVAL; goto delete_eng_grp;
}
engs[0].type = OTX2_CPT_AE_TYPES;
engs[0].count = eng_grps->avail.max_ae_cnt;
ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
(void **) uc_info, 1); if (ret) goto delete_eng_grp;
eng_grps->is_grps_created = true;
cpt_ucode_release_fw(&fw_info);
if (is_dev_otx2(pdev)) goto unlock;
/* * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
*/
rnm_to_cpt_errata_fixup(&pdev->dev);
otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, ®_val,
BLKADDR_CPT0); /* * Configure engine group mask to allow context prefetching * for the groups and enable random number request, to enable * CPT to request random numbers from RNM.
*/
reg_val |= OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16);
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
reg_val, BLKADDR_CPT0); /* * Set interval to periodically flush dirty data for the next * CTX cache entry. Set the interval count to maximum supported * value.
*/
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
/* * Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when * CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM * encounters a fault/poison, a rare case may result in * unpredictable data being delivered to a CPT engine.
*/ if (cpt_is_errata_38550_exists(pdev)) {
otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
®_val, BLKADDR_CPT0);
otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
reg_val | BIT_ULL(24), BLKADDR_CPT0);
}
staticint cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores, int blkaddr)
{ int timeout = 10, ret; int i, busy;
u64 reg;
/* Disengage the cores from groups */ for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL2(i), 0x0,
blkaddr); if (ret) return ret;
cptpf->eng_grps.eng_ref_cnt[i] = 0;
}
ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev); if (ret) return ret;
/* Wait for cores to become idle */ do {
busy = 0;
usleep_range(10000, 20000); if (timeout-- < 0) return -EBUSY;
for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
cptpf->pdev,
CPT_AF_EXEX_STS(i), ®,
blkaddr); if (ret) return ret;
if (reg & 0x1) {
busy = 1; break;
}
}
} while (busy);
/* Disable the cores */ for (i = 0; i < total_cores; i++) {
ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
CPT_AF_EXEX_CTL(i), 0x0,
blkaddr); if (ret) return ret;
} return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
}
int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
{ int total_cores, ret;
/* * Get CPT HW capabilities using LOAD_FVC operation.
*/ int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
{ struct otx2_cptlfs_info *lfs = &cptpf->lfs; struct otx2_cpt_iq_command iq_cmd; union otx2_cpt_opcode opcode; union otx2_cpt_res_s *result; union otx2_cpt_inst_s inst;
dma_addr_t result_baddr;
dma_addr_t rptr_baddr; struct pci_dev *pdev; int timeout = 10000; void *base, *rptr; int ret, etype;
u32 len;
/* * We don't get capabilities if it was already done * (when user enabled VFs for the first time)
*/ if (cptpf->is_eng_caps_discovered) return 0;
pdev = cptpf->pdev; /* * Create engine groups for each type to submit LOAD_FVC op and * get engine's capabilities.
*/
ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps); if (ret) goto delete_grps;
ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
OTX2_CPT_QUEUE_HI_PRIO, 1); if (ret) goto delete_grps;
/* Allocate extra memory for "rptr" and "result" pointer alignment */
len = LOADFVC_RLEN + ARCH_DMA_MINALIGN + sizeof(union otx2_cpt_res_s) + OTX2_CPT_RES_ADDR_ALIGN;
base = kzalloc(len, GFP_KERNEL); if (!base) {
ret = -ENOMEM; goto lf_cleanup;
}
if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {
err_msg = "Error max 2 engine types can be attached"; goto err_print;
}
if (grp_idx > 1) { if ((engs[0].type + engs[1].type) !=
(OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {
err_msg = "Only combination of SE+IE engines is allowed"; goto err_print;
} /* Keep SE engines at zero index */ if (engs[1].type == OTX2_CPT_SE_TYPES)
swap(engs[0], engs[1]);
}
mutex_lock(&eng_grps->lock);
if (cptpf->enabled_vfs) {
dev_err(dev, "Disable VFs before modifying engine groups\n");
ret = -EACCES; goto err_unlock;
}
INIT_LIST_HEAD(&fw_info.ucodes);
ret = load_fw(dev, &fw_info, ucode_filename[0], eng_grps->rid); if (ret) {
dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]); goto err_unlock;
} if (ucode_idx > 1) {
ret = load_fw(dev, &fw_info, ucode_filename[1], eng_grps->rid); if (ret) {
dev_err(dev, "Unable to load firmware %s\n",
ucode_filename[1]); goto release_fw;
}
}
uc_info[0] = get_ucode(&fw_info, engs[0].type); if (uc_info[0] == NULL) {
dev_err(dev, "Unable to find firmware for %s\n",
get_eng_type_str(engs[0].type));
ret = -EINVAL; goto release_fw;
} if (ucode_idx > 1) {
uc_info[1] = get_ucode(&fw_info, engs[1].type); if (uc_info[1] == NULL) {
dev_err(dev, "Unable to find firmware for %s\n",
get_eng_type_str(engs[1].type));
ret = -EINVAL; goto release_fw;
}
}
ret = create_engine_group(dev, eng_grps, engs, grp_idx,
(void **)uc_info, 1);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.