switch (trans->conf.rx_buf_size) { case IWL_AMSDU_DEF: return -EINVAL; case IWL_AMSDU_2K: break; case IWL_AMSDU_4K:
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; break; case IWL_AMSDU_8K:
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; /* if firmware supports the ext size, tell it */
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K; break; case IWL_AMSDU_12K:
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_4K; /* if firmware supports the ext size, tell it */
control_flags |= IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K; break;
}
if (trans->conf.dsbr_urm_fw_dependent)
control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_FW;
if (trans->conf.dsbr_urm_permanent)
control_flags_ext |= IWL_PRPH_SCRATCH_EXT_URM_PERM;
if (trans->conf.ext_32khz_clock_valid)
control_flags_ext |= IWL_PRPH_SCRATCH_EXT_32KHZ_CLK_VALID;
/* initialize the Step equalizer data */
prph_sc_ctrl->step_cfg.mbx_addr_0 =
cpu_to_le32(trans->conf.mbx_addr_0_step);
prph_sc_ctrl->step_cfg.mbx_addr_1 =
cpu_to_le32(trans->conf.mbx_addr_1_step);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, img, &prph_scratch->dram.common); if (ret) goto err_free_prph_scratch;
/* Allocate prph information * currently we don't assign to the prph info anything, but it would get * assigned later * * We also use the second half of this page to give the device some * dummy TR/CR tail pointers - which shouldn't be necessary as we don't * use this, but the hardware still reads/writes there and we can't let * it go do that with a NULL pointer.
*/
BUILD_BUG_ON(sizeof(*prph_info) > PAGE_SIZE / 2);
prph_info = dma_alloc_coherent(trans->dev, PAGE_SIZE,
&trans_pcie->prph_info_dma_addr,
GFP_KERNEL); if (!prph_info) {
ret = -ENOMEM; goto err_free_prph_scratch;
}
/* Allocate context info */
ctxt_info_v2 = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info_v2),
&trans_pcie->ctxt_info_dma_addr,
GFP_KERNEL); if (!ctxt_info_v2) {
ret = -ENOMEM; goto err_free_prph_info;
}
/* * This code assumes the FSEQ is last and we can make that * optional; old devices _should_ be fine with a bigger size, * but in simulation we check the size more precisely.
*/
BUILD_BUG_ON(offsetofend(typeof(*prph_scratch), dram.common) + sizeof(prph_scratch->dram.fseq_img) != sizeof(*prph_scratch)); if (control_flags_ext & IWL_PRPH_SCRATCH_EXT_EXT_FSEQ)
ctxt_info_v2->prph_scratch_size =
cpu_to_le32(sizeof(*prph_scratch)); else
ctxt_info_v2->prph_scratch_size =
cpu_to_le32(offsetofend(typeof(*prph_scratch),
dram.common));
/* this is needed for the entire lifetime */
dma_free_coherent(trans->dev, PAGE_SIZE, trans_pcie->prph_info,
trans_pcie->prph_info_dma_addr);
trans_pcie->prph_info_dma_addr = 0;
trans_pcie->prph_info = NULL;
}
/* allocate and init DRAM descriptors array */
len = sizeof(struct iwl_prph_scratch_mem_desc_addr_array);
desc_dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent
(trans,
len,
&desc_dram->physical); if (!desc_dram->block) {
IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA.\n"); return -ENOMEM;
}
desc_dram->size = len;
memset(desc_dram->block, 0, len);
/* allocate DRAM region for each payload */
dram_regions->n_regions = 0; for (i = 0; i < pnvm_data->n_chunks; i++) {
len = pnvm_data->chunks[i].len;
data = pnvm_data->chunks[i].data;
/* fill desc with the DRAM payloads addresses */
addresses = desc_dram->block; for (i = 0; i < pnvm_data->n_chunks; i++) {
addresses->mem_descs[i] =
cpu_to_le64(dram_regions->drams[i].physical);
}
return 0;
}
int iwl_trans_pcie_ctx_info_v2_load_pnvm(struct iwl_trans *trans, conststruct iwl_pnvm_image *pnvm_payloads, conststruct iwl_ucode_capabilities *capa)
{ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg; struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data; int ret = 0;
/* only allocate the DRAM if not allocated yet */ if (trans->pnvm_loaded) return 0;
if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size)) return -EBUSY;
if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return 0;
if (!pnvm_payloads->n_chunks) {
IWL_DEBUG_FW(trans, "no payloads\n"); return -EINVAL;
}
/* save payloads in several DRAM sections */ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
ret = iwl_pcie_load_payloads_segments(trans,
dram_regions,
pnvm_payloads); if (!ret)
trans->pnvm_loaded = true;
} else { /* save only in one DRAM section */
ret = iwl_pcie_load_payloads_contig(trans, pnvm_payloads,
&dram_regions->drams[0]); if (!ret) {
dram_regions->n_regions = 1;
trans->pnvm_loaded = true;
}
}
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
iwl_pcie_set_pnvm_segments(trans); else
iwl_pcie_set_contig_pnvm(trans);
}
int iwl_trans_pcie_ctx_info_v2_load_reduce_power(struct iwl_trans *trans, conststruct iwl_pnvm_image *payloads, conststruct iwl_ucode_capabilities *capa)
{ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg; struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data; int ret = 0;
/* only allocate the DRAM if not allocated yet */ if (trans->reduce_power_loaded) return 0;
if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return 0;
if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size)) return -EBUSY;
if (!payloads->n_chunks) {
IWL_DEBUG_FW(trans, "no payloads\n"); return -EINVAL;
}
/* save payloads in several DRAM sections */ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
ret = iwl_pcie_load_payloads_segments(trans,
dram_regions,
payloads); if (!ret)
trans->reduce_power_loaded = true;
} else { /* save only in one DRAM section */
ret = iwl_pcie_load_payloads_contig(trans, payloads,
&dram_regions->drams[0]); if (!ret) {
dram_regions->n_regions = 1;
trans->reduce_power_loaded = true;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.