/* Mac80211-queue to b43legacy-ring mapping */ staticstruct b43legacy_dmaring *priority_to_txring( struct b43legacy_wldev *dev, int queue_priority)
{ struct b43legacy_dmaring *ring;
/*FIXME: For now we always run on TX-ring-1 */ return dev->dma.tx_ring1;
/* 0 = highest priority */ switch (queue_priority) { default:
B43legacy_WARN_ON(1);
fallthrough; case 0:
ring = dev->dma.tx_ring3; break; case 1:
ring = dev->dma.tx_ring2; break; case 2:
ring = dev->dma.tx_ring1; break; case 3:
ring = dev->dma.tx_ring0; break; case 4:
ring = dev->dma.tx_ring4; break; case 5:
ring = dev->dma.tx_ring5; break;
}
staticinline void free_descriptor_buffer(struct b43legacy_dmaring *ring, struct b43legacy_dmadesc_meta *meta, int irq_context)
{ if (meta->skb) { if (irq_context)
dev_kfree_skb_irq(meta->skb); else
dev_kfree_skb(meta->skb);
meta->skb = NULL;
}
}
staticint alloc_ringmemory(struct b43legacy_dmaring *ring)
{ /* GFP flags must match the flags in free_ringmemory()! */
ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
B43legacy_DMA_RINGMEMSIZE,
&(ring->dmabase), GFP_KERNEL); if (!ring->descbase) return -ENOMEM;
/* Allocate the initial descbuffers. * This is used for an RX ring only.
*/ staticint alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
{ int i; int err = -ENOMEM; struct b43legacy_dmadesc32 *desc; struct b43legacy_dmadesc_meta *meta;
for (i = 0; i < ring->nr_slots; i++) {
desc = op32_idx2desc(ring, i, &meta);
err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); if (err) {
b43legacyerr(ring->dev->wl, "Failed to allocate initial descbuffers\n"); goto err_unwind;
}
}
mb(); /* all descbuffer setup before next line */
ring->used_slots = ring->nr_slots;
err = 0;
out: return err;
err_unwind: for (i--; i >= 0; i--) {
desc = op32_idx2desc(ring, i, &meta);
/* Do initial setup of the DMA controller. * Reset the controller, write the ring busaddress * and switch the "enable" bit on.
*/ staticint dmacontroller_setup(struct b43legacy_dmaring *ring)
{ int err = 0;
u32 value;
u32 addrext;
u32 trans = ring->dev->dma.translation;
u32 ringbase = (u32)(ring->dmabase);
if (ring->tx) {
addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
>> SSB_DMA_TRANSLATION_SHIFT;
value = B43legacy_DMA32_TXENABLE;
value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
& B43legacy_DMA32_TXADDREXT_MASK;
b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
(ringbase & ~SSB_DMA_TRANSLATION_MASK)
| trans);
} else {
err = alloc_initial_descbuffers(ring); if (err) goto out;
/* Main initialization function. */ static struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev, int controller_index, int for_tx, enum b43legacy_dmatype type)
{ struct b43legacy_dmaring *ring; int err; int nr_slots;
dma_addr_t dma_test;
ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) goto out;
ring->type = type;
ring->dev = dev;
nr_slots = B43legacy_RXRING_SLOTS; if (for_tx)
nr_slots = B43legacy_TXRING_SLOTS;
ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
GFP_KERNEL); if (!ring->meta) goto err_kfree_ring; if (for_tx) {
ring->txhdr_cache = kcalloc(nr_slots, sizeof(struct b43legacy_txhdr_fw3),
GFP_KERNEL); if (!ring->txhdr_cache) goto err_kfree_meta;
/* test for ability to dma to txhdr_cache */
dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache, sizeof(struct b43legacy_txhdr_fw3),
DMA_TO_DEVICE);
/* Main cleanup function. */ staticvoid b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
{ if (!ring) return;
b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:" " %d/%d\n", (unsignedint)(ring->type), ring->mmio_base,
(ring->tx) ? "TX" : "RX", ring->max_used_slots,
ring->nr_slots); /* Device IRQs are disabled prior entering this function, * so no need to take care of concurrency with rx handler stuff.
*/
dmacontroller_cleanup(ring);
free_all_descbuffers(ring);
free_ringmemory(ring);
int b43legacy_dma_init(struct b43legacy_wldev *dev)
{ struct b43legacy_dma *dma = &dev->dma; struct b43legacy_dmaring *ring; enum b43legacy_dmatype type = b43legacy_engine_type(dev); int err;
err = dma_set_mask_and_coherent(dev->dev->dma_dev, DMA_BIT_MASK(type)); if (err) { #ifdef CONFIG_B43LEGACY_PIO
b43legacywarn(dev->wl, "DMA for this device not supported. " "Falling back to PIO\n");
dev->__using_pio = true; return -EAGAIN; #else
b43legacyerr(dev->wl, "DMA for this device not supported and " "no PIO support compiled in\n"); return -EOPNOTSUPP; #endif
}
dma->translation = ssb_dma_translation(dev->dev);
/* Generate a cookie for the TX header. */ static u16 generate_cookie(struct b43legacy_dmaring *ring, int slot)
{
u16 cookie = 0x1000;
/* Use the upper 4 bits of the cookie as * DMA controller ID and store the slot number * in the lower 12 bits. * Note that the cookie must never be 0, as this * is a special value used in RX path.
*/ switch (ring->index) { case 0:
cookie = 0xA000; break; case 1:
cookie = 0xB000; break; case 2:
cookie = 0xC000; break; case 3:
cookie = 0xD000; break; case 4:
cookie = 0xE000; break; case 5:
cookie = 0xF000; break;
}
B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
cookie |= (u16)slot;
return cookie;
}
/* Inspect a cookie and find out to which controller/slot it belongs. */ static struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
u16 cookie, int *slot)
{ struct b43legacy_dma *dma = &dev->dma; struct b43legacy_dmaring *ring = NULL;
switch (cookie & 0xF000) { case 0xA000:
ring = dma->tx_ring0; break; case 0xB000:
ring = dma->tx_ring1; break; case 0xC000:
ring = dma->tx_ring2; break; case 0xD000:
ring = dma->tx_ring3; break; case 0xE000:
ring = dma->tx_ring4; break; case 0xF000:
ring = dma->tx_ring5; break; default:
B43legacy_WARN_ON(1);
}
*slot = (cookie & 0x0FFF);
B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
staticinline int should_inject_overflow(struct b43legacy_dmaring *ring)
{ #ifdef CONFIG_B43LEGACY_DEBUG if (unlikely(b43legacy_debug(ring->dev,
B43legacy_DBG_DMAOVERFLOW))) { /* Check if we should inject another ringbuffer overflow
* to test handling of this situation in the stack. */ unsignedlong next_overflow;
int b43legacy_dma_tx(struct b43legacy_wldev *dev, struct sk_buff *skb)
{ struct b43legacy_dmaring *ring; int err = 0;
ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
B43legacy_WARN_ON(!ring->tx);
if (unlikely(ring->stopped)) { /* We get here only because of a bug in mac80211. * Because of a race, one packet may be queued after * the queue is stopped, thus we got called when we shouldn't.
* For now, just refuse the transmit. */ if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacyerr(dev->wl, "Packet after queue stopped\n"); return -ENOSPC;
}
if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) { /* If we get here, we have a real error with the queue
* full, but queues not stopped. */
b43legacyerr(dev->wl, "DMA queue overflow\n"); return -ENOSPC;
}
/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
* into the skb data or cb now. */
err = dma_tx_fragment(ring, &skb); if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key
* anymore and must not transmit it unencrypted. */
dev_kfree_skb_any(skb); return 0;
} if (unlikely(err)) {
b43legacyerr(dev->wl, "DMA tx mapping failure\n"); return err;
} if ((free_slots(ring) < SLOTS_PER_PACKET) ||
should_inject_overflow(ring)) { /* This TX ring is full. */ unsignedint skb_mapping = skb_get_queue_mapping(skb);
ieee80211_stop_queue(dev->wl->hw, skb_mapping);
dev->wl->tx_queue_stopped[skb_mapping] = 1;
ring->stopped = true; if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacydbg(dev->wl, "Stopped TX ring %d\n",
ring->index);
} return err;
}
void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, conststruct b43legacy_txstatus *status)
{ struct b43legacy_dmaring *ring; struct b43legacy_dmadesc_meta *meta; int retry_limit; int slot; int firstused;
ring = parse_cookie(dev, status->cookie, &slot); if (unlikely(!ring)) return;
B43legacy_WARN_ON(!ring->tx);
/* Sanity check: TX packets are processed in-order on one ring. * Check if the slot deduced from the cookie really is the first
* used slot. */
firstused = ring->current_slot - ring->used_slots + 1; if (firstused < 0)
firstused = ring->nr_slots + firstused; if (unlikely(slot != firstused)) { /* This possibly is a firmware bug and will result in * malfunction, memory leaks and/or stall of DMA functionality.
*/
b43legacydbg(dev->wl, "Out of order TX status report on DMA " "ring %d. Expected %d, but got %d\n",
ring->index, firstused, slot); return;
}
if (meta->is_last_fragment) { struct ieee80211_tx_info *info;
BUG_ON(!meta->skb);
info = IEEE80211_SKB_CB(meta->skb);
/* preserve the confiured retry limit before clearing the status * The xmit function has overwritten the rc's value with the actual
* retry limit done by the hardware */
retry_limit = info->status.rates[0].count;
ieee80211_tx_info_clear_status(info);
if (status->acked)
info->flags |= IEEE80211_TX_STAT_ACK;
if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) { /* * If the short retries (RTS, not data frame) have exceeded * the limit, the hw will not have tried the selected rate, * but will have used the fallback rate instead. * Don't let the rate control count attempts for the selected * rate in this case, otherwise the statistics will be off.
*/
info->status.rates[0].count = 0;
info->status.rates[1].count = status->frame_count;
} else { if (status->frame_count > retry_limit) {
info->status.rates[0].count = retry_limit;
info->status.rates[1].count = status->frame_count -
retry_limit;
/* Call back to inform the ieee80211 subsystem about the * status of the transmission. * Some fields of txstat are already filled in dma_tx().
*/
ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb); /* skb is freed by ieee80211_tx_status_irqsafe() */
meta->skb = NULL;
} else { /* No need to call free_descriptor_buffer here, as * this is only the txhdr, which is not allocated.
*/
B43legacy_WARN_ON(meta->skb != NULL);
}
/* Everything unmapped and free'd. So it's not used anymore. */
ring->used_slots--;
if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
} else { /* If the driver queue is running wake the corresponding
* mac80211 queue. */
ieee80211_wake_queue(dev->wl->hw, ring->queue_prio); if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacydbg(dev->wl, "Woke up TX ring %d\n",
ring->index);
} /* Add work to the queue. */
ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
}
if (ring->index == 3) { /* We received an xmit status. */ struct b43legacy_hwtxstatus *hw =
(struct b43legacy_hwtxstatus *)skb->data; int i = 0;
while (hw->cookie == 0) { if (i > 100) break;
i++;
udelay(2);
barrier();
}
b43legacy_handle_hwtxstatus(ring->dev, hw); /* recycle the descriptor buffer. */
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
return;
}
rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
len = le16_to_cpu(rxhdr->frame_len); if (len == 0) { int i = 0;
do {
udelay(2);
barrier();
len = le16_to_cpu(rxhdr->frame_len);
} while (len == 0 && i++ < 5); if (unlikely(len == 0)) { /* recycle the descriptor buffer. */
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize); goto drop;
}
} if (unlikely(len > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers * big enough. So simply ignore this packet.
*/ int cnt = 0;
s32 tmp = len;
while (1) {
desc = op32_idx2desc(ring, *slot, &meta); /* recycle the descriptor buffer. */
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
*slot = next_slot(ring, *slot);
cnt++;
tmp -= ring->rx_buffersize; if (tmp <= 0) break;
}
b43legacyerr(ring->dev->wl, "DMA RX buffer too small " "(len: %u, buffer: %u, nr-dropped: %d)\n",
len, ring->rx_buffersize, cnt); goto drop;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.