ret = alloc_page(GFP_ATOMIC); if (!ret) return NULL;
info = IWL_TSO_PAGE_INFO(page_address(ret));
/* Create a DMA mapping for the page */
phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE,
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); if (unlikely(dma_mapping_error(trans->dev, phys))) {
__free_page(ret); return NULL;
}
/* Store physical address and set use count */
info->dma_addr = phys;
refcount_set(&info->use_count, 1);
/* set the chaining pointer to the previous page if there */
info->next = *page_ptr;
*page_ptr = ret;
return ret;
}
/* * Add a TB and if needed apply the FH HW bug workaround; * meta != NULL indicates that it's a page mapping and we * need to dma_unmap_page() and set the meta->tbs bit in * this case.
*/ staticint iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd,
dma_addr_t phys, void *virt,
u16 len, struct iwl_cmd_meta *meta, bool unmap)
{
dma_addr_t oldphys = phys; struct page *page; int ret;
if (unlikely(dma_mapping_error(trans->dev, phys))) return -ENOMEM;
if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
if (ret < 0) goto unmap;
if (meta)
meta->tbs |= BIT(ret);
ret = 0; goto trace;
}
/* * Work around a hardware bug. If (as expressed in the * condition above) the TB ends on a 32-bit boundary, * then the next TB may be accessed with the wrong * address. * To work around it, copy the data elsewhere and make * a new mapping for it so the device will not fail.
*/
if (WARN_ON(len > IWL_TSO_PAGE_DATA_SIZE)) {
ret = -ENOBUFS; goto unmap;
}
page = get_workaround_page(trans, skb); if (!page) {
ret = -ENOMEM; goto unmap;
}
memcpy(page_address(page), virt, len);
/* * This is a bit odd, but performance does not matter here, what * matters are the expectations of the calling code and TB cleanup * function. * * As such, if unmap is set, then create another mapping for the TB * entry as it will be unmapped later. On the other hand, if it is not * set, then the TB entry will not be unmapped and instead we simply * reference and sync the mapping that get_workaround_page() created.
*/ if (unmap) {
phys = dma_map_single(trans->dev, page_address(page), len,
DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, phys))) return -ENOMEM;
} else {
phys = iwl_pcie_get_tso_page_phys(page_address(page));
dma_sync_single_for_device(trans->dev, phys, len,
DMA_TO_DEVICE);
}
ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); if (ret < 0) { /* unmap the new allocation as single */
oldphys = phys;
meta = NULL; goto unmap;
}
IWL_DEBUG_TX(trans, "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
len, (unsignedlonglong)oldphys,
(unsignedlonglong)phys);
/* total amount of header we may need for this A-MSDU */
hdr_room = DIV_ROUND_UP(total_len, mss) *
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
snap_ip_tcp_hdrlen + hdr_len); if (!sgt) return -ENOMEM;
/* * Pull the ieee80211 header to be able to use TSO core, * we will restore it for the tx_status flow.
*/
skb_pull(skb, hdr_len);
/* * Remove the length of all the headers that we don't actually * have in the MPDU by themselves, but that we duplicate into * all the different MSDUs inside the A-MSDU.
*/
le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
tso_start(skb, &tso);
while (total_len) { /* this is the data left for this subframe */ unsignedint data_left = min_t(unsignedint, mss, total_len); unsignedint tb_len;
dma_addr_t tb_phys;
u8 *pos_hdr = start_hdr;
/* * No need for _with_wa, this is from the TSO page and * we leave some space at the end of it so can't hit * the buggy scenario.
*/
iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
tb_phys, tb_len); /* add this subframe's headers' length to the tx_cmd */
le16_add_cpu(&tx_cmd->len, tb_len);
/* prepare the start_hdr for the next subframe */
start_hdr = pos_hdr;
/* put the payload */ while (data_left) { int ret;
tb_len = min_t(unsignedint, tso.size, data_left);
tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset,
tb_len); /* Not a real mapping error, use direct comparison */ if (unlikely(tb_phys == DMA_MAPPING_ERROR)) goto out_err;
ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
tb_phys, tso.data,
tb_len, NULL, false); if (ret) goto out_err;
/* re -add the WiFi header */
skb_push(skb, hdr_len);
return 0;
out_err: #endif return -EINVAL;
}
staticstruct
iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, struct iwl_txq *txq, struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, int tx_cmd_len)
{ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
dma_addr_t tb_phys; int len; void *tb1_addr;
tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
/* * No need for _with_wa, the first TB allocation is aligned up * to a 64-byte boundary and thus can't be at the end or cross * a page boundary (much less a 2^32 boundary).
*/
iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/* * The second TB (tb1) points to the remainder of the TX command * and the 802.11 header - dword aligned size * (This calculation modifies the TX command, so do it before the * setup of the first TB)
*/
len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
IWL_FIRST_TB_SIZE;
/* do not align A-MSDU to dword as the subframe header aligns it */
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; /* * No need for _with_wa(), we ensure (via alignment) that the data * here can never cross or end at a page boundary.
*/
iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, out_meta,
len + IWL_FIRST_TB_SIZE, hdr_len, dev_cmd)) goto out_err;
/* building the A-MSDU might have changed this data, memcpy it now */
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); return tfd;
/* The first TB points to bi-directional DMA data */
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
/* * No need for _with_wa, the first TB allocation is aligned up * to a 64-byte boundary and thus can't be at the end or cross * a page boundary (much less a 2^32 boundary).
*/
iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/* * The second TB (tb1) points to the remainder of the TX command * and the 802.11 header - dword aligned size * (This calculation modifies the TX command, so do it before the * setup of the first TB)
*/
len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
IWL_FIRST_TB_SIZE;
if (pad)
tb1_len = ALIGN(len, 4); else
tb1_len = len;
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; /* * No need for _with_wa(), we ensure (via alignment) that the data * here can never cross or end at a page boundary.
*/
iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
/* set up TFD's third entry to point to remainder of skb's head */
tb2_len = skb_headlen(skb) - hdr_len;
/* There must be data left over for TB1 or this code must be changed */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v9) < IWL_FIRST_TB_SIZE);
BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
offsetofend(struct iwl_tx_cmd_v9, dram_info) >
IWL_FIRST_TB_SIZE);
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
offsetofend(struct iwl_tx_cmd, dram_info) >
IWL_FIRST_TB_SIZE);
memset(tfd, 0, sizeof(*tfd));
if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
len = sizeof(struct iwl_tx_cmd_v9); else
len = sizeof(struct iwl_tx_cmd);
/* * Only build A-MSDUs here if doing so by GSO, otherwise it may be * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been * built in the higher layers already.
*/ if (amsdu && skb_shinfo(skb)->gso_size) return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
out_meta, hdr_len, len); return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
hdr_len, len, !amsdu);
}
/* * To avoid ambiguity between empty and completely full queues, there * should always be less than max_tfd_queue_size elements in the queue. * If q->n_window is smaller than max_tfd_queue_size, there is no need * to reserve any queue entries for this purpose.
*/ if (q->n_window < trans->mac_cfg->base->max_tfd_queue_size)
max = q->n_window; else
max = trans->mac_cfg->base->max_tfd_queue_size - 1;
/* * max_tfd_queue_size is a power of 2, so the following is equivalent to * modulo by max_tfd_queue_size and is well defined.
*/
used = (q->write_ptr - q->read_ptr) &
(trans->mac_cfg->base->max_tfd_queue_size - 1);
if (WARN_ON(used > max)) return 0;
return max - used;
}
/* * iwl_pcie_gen2_update_byte_tbl - Set up entry in Tx byte-count array
*/ staticvoid iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, struct iwl_txq *txq, u16 byte_cnt, int num_tbs)
{ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); struct iwl_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
__le16 bc_ent;
if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) return;
filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
num_tbs * sizeof(struct iwl_tfh_tb); /* * filled_tfd_size contains the number of filled bytes in the TFD. * Dividing it by 64 will give the number of chunks to fetch * to SRAM- 0 for one chunk, 1 for 2 and so on. * If, for example, TFD contains only 3 TBs then 32 bytes * of the TFD are used, and only one chunk of 64 bytes should * be fetched
*/
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
/* Only WARN here so we know about the issue, but we mess up our * unmap path because not every place currently checks for errors * returned from this function - it can only return an error if * there's no more space, and so when we know there is enough we * don't always check ...
*/
WARN(iwl_txq_crosses_4g_boundary(addr, len), "possible DMA problem with iova:0x%llx, len:%d\n",
(unsignedlonglong)addr, len);
if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) return -EINVAL;
tb = &tfd->tbs[idx];
/* Each TFD can point to a maximum max_tbs Tx buffers */ if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->txqs.tfd.max_tbs) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
trans_pcie->txqs.tfd.max_tbs); return -EINVAL;
}
void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_tfh_tfd *tfd)
{ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i, num_tbs;
/* Sanity check on number of chunks */
num_tbs = iwl_txq_gen2_get_num_tbs(tfd);
if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); return;
}
/* TB1 is mapped directly, the rest is the TSO page and SG list. */ if (meta->sg_offset)
num_tbs = 2;
/* first TB is never freed - it's the bidirectional DMA data */ for (i = 1; i < num_tbs; i++) { if (meta->tbs & BIT(i))
dma_unmap_page(trans->dev,
le64_to_cpu(tfd->tbs[i].addr),
le16_to_cpu(tfd->tbs[i].tb_len),
DMA_TO_DEVICE); else
dma_unmap_single(trans->dev,
le64_to_cpu(tfd->tbs[i].addr),
le16_to_cpu(tfd->tbs[i].tb_len),
DMA_TO_DEVICE);
}
iwl_txq_set_tfd_invalid_gen2(trans, tfd);
}
staticvoid iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
{ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and * idx is bounded by n_window
*/ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); struct sk_buff *skb;
/* Can be called from irqs-disabled context * If skb is not NULL, it means that the whole queue is being * freed and that the queue is not empty - free the skb
*/ if (skb) {
iwl_op_mode_free_skb(trans->op_mode, skb);
txq->entries[idx].skb = NULL;
}
}
/* * iwl_txq_inc_wr_ptr - Send new write index to hardware
*/ staticvoid iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
{
lockdep_assert_held(&txq->lock);
/* * if not in power-save mode, uCode will never sleep when we're * trying to tx (during RFKILL, we're not trying to tx).
*/
iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
}
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
iwl_txq_gen2_get_num_tbs(tfd));
/* start timer if queue currently empty */ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
/* Tell device the write index *just past* this latest filled TFD */
txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
iwl_txq_inc_wr_ptr(trans, txq); /* * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually.
*/
spin_unlock(&txq->lock); return 0;
}
kfree(txq->entries); if (txq->bc_tbl.addr)
dma_pool_free(trans_pcie->txqs.bc_pool,
txq->bc_tbl.addr, txq->bc_tbl.dma);
kfree(txq);
}
/* * iwl_pcie_txq_free - Deallocate DMA queue. * @txq: Transmit queue to deallocate. * * Empty queue by removing and destroying all BD's. * Free all buffers. * 0-fill, but do not free "txq" descriptor structure.
*/ staticvoid iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
{ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq; int i;
if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, "queue %d out of range", txq_id)) return;
txq = trans_pcie->txqs.txq[txq_id];
if (WARN_ON(!txq)) return;
iwl_txq_gen2_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */ if (txq_id == trans->conf.cmd_queue) for (i = 0; i < txq->n_window; i++) {
kfree_sensitive(txq->entries[i].cmd);
kfree_sensitive(txq->entries[i].free_buf);
}
timer_delete_sync(&txq->stuck_timer);
/* take the min with bytecount table entries allowed */
size = min_t(u32, size, trans_pcie->txqs.bc_tbl_size / sizeof(u16)); /* but must be power of 2 values for calculating read/write pointers */
size = rounddown_pow_of_two(size);
if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
trans->info.hw_rev_step == SILICON_A_STEP) {
size = 4096;
txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
} else { do {
txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); if (!IS_ERR(txq)) break;
IWL_DEBUG_TX_QUEUES(trans, "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %ld\n",
size, sta_mask, tid,
PTR_ERR(txq));
size /= 2;
} while (size >= 16);
}
if (WARN(queue >= IWL_MAX_TVQM_QUEUES, "queue %d out of range", queue)) return;
/* * Upon HW Rfkill - we stop the device, and then stop the queues * in the op_mode. Just for the sake of the simplicity of the op_mode, * allow the op_mode to call txq_disable after it already called * stop_device.
*/ if (!test_and_clear_bit(queue, trans_pcie->txqs.queue_used)) {
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), "queue %d not used", queue); return;
}
/* Free all TX queues */ for (i = 0; i < ARRAY_SIZE(trans_pcie->txqs.txq); i++) { if (!trans_pcie->txqs.txq[i]) continue;
iwl_txq_gen2_free(trans, i);
}
}
int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
{ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *queue; int ret;
/* alloc and init the tx queue */ if (!trans_pcie->txqs.txq[txq_id]) {
queue = kzalloc(sizeof(*queue), GFP_KERNEL); if (!queue) {
IWL_ERR(trans, "Not enough memory for tx queue\n"); return -ENOMEM;
}
trans_pcie->txqs.txq[txq_id] = queue;
ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true); if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error;
}
} else {
queue = trans_pcie->txqs.txq[txq_id];
}
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
had_nocopy = true; if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
idx = -EINVAL; goto free_dup_buf;
}
} elseif (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { /* * This is also a chunk that isn't copied * to the static buffer so set had_nocopy.
*/
had_nocopy = true;
/* only allowed once */ if (WARN_ON(dup_buf)) {
idx = -EINVAL; goto free_dup_buf;
}
dup_buf = kmemdup(cmddata[i], cmdlen[i],
GFP_ATOMIC); if (!dup_buf) return -ENOMEM;
} else { /* NOCOPY must not be followed by normal! */ if (WARN_ON(had_nocopy)) {
idx = -EINVAL; goto free_dup_buf;
}
copy_size += cmdlen[i];
}
cmd_size += cmd->len[i];
}
/* * If any of the command structures end up being larger than the * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into * separate TFDs, then we will need to increase the size of the buffers
*/ if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, "Command %s (%#x) is too large (%d bytes)\n",
iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
idx = -EINVAL; goto free_dup_buf;
}
/* re-initialize, this also marks the SG list as unused */
memset(out_meta, 0, sizeof(*out_meta)); if (cmd->flags & CMD_WANT_SKB)
out_meta->source = cmd;
/* set up the header */
out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr_wide.group_id = group_id;
out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
out_cmd->hdr_wide.length =
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
INDEX_TO_SEQ(txq->write_ptr));
/* * Otherwise we need at least IWL_FIRST_TB_SIZE copied * in total (for bi-directional DMA), but copy up to what * we can fit into the payload for debug dump purposes.
*/
copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.