if (dev->hif2)
hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
mt7996_dma_disable(dev, true);
/* init tx queue */
ret = mt7996_init_tx_queues(&dev->phy,
MT_TXQ_ID(dev->mphy.band_idx),
MT7996_TX_RING_SIZE,
MT_TXQ_RING_BASE(0),
wed); if (ret) return ret;
/* command to WM */
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
MT_MCUQ_ID(MT_MCUQ_WM),
MT7996_TX_MCU_RING_SIZE,
MT_MCUQ_RING_BASE(MT_MCUQ_WM)); if (ret) return ret;
/* command to WA */ if (mt7996_has_wa(dev)) {
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
MT_MCUQ_ID(MT_MCUQ_WA),
MT7996_TX_MCU_RING_SIZE,
MT_MCUQ_RING_BASE(MT_MCUQ_WA)); if (ret) return ret;
}
/* firmware download */
ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
MT_MCUQ_ID(MT_MCUQ_FWDL),
MT7996_TX_FWDL_RING_SIZE,
MT_MCUQ_RING_BASE(MT_MCUQ_FWDL)); if (ret) return ret;
/* event from WM */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
MT_RXQ_ID(MT_RXQ_MCU),
MT7996_RX_MCU_RING_SIZE,
MT7996_RX_MCU_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MCU)); if (ret) return ret;
/* event from WA, or SDO event for mt7990 */
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
MT_RXQ_ID(MT_RXQ_MCU_WA),
MT7996_RX_MCU_RING_SIZE_WA,
MT7996_RX_MCU_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MCU_WA)); if (ret) return ret;
/* rx data queue for band0 and mt7996 band1 */ if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed)) {
dev->mt76.q_rx[MT_RXQ_MAIN].flags = MT_WED_Q_RX(0);
dev->mt76.q_rx[MT_RXQ_MAIN].wed = wed;
}
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
MT_RXQ_ID(MT_RXQ_MAIN),
MT7996_RX_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MAIN)); if (ret) return ret;
/* tx free notify event from WA for band0 */ if (mtk_wed_device_active(wed) && !dev->has_rro) {
dev->mt76.q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_MAIN_WA].wed = wed;
}
if (mt7996_has_wa(dev)) {
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
MT_RXQ_ID(MT_RXQ_MAIN_WA),
MT7996_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA)); if (ret) return ret;
} else { if (mtk_wed_device_active(wed)) {
dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
}
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
MT7996_RX_MCU_RING_SIZE,
MT7996_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0)); if (ret) return ret;
}
if (!mt7996_has_wa(dev) && dev->hif2) { if (mtk_wed_device_active(wed)) {
dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1].wed = wed;
}
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND1],
MT_RXQ_ID(MT_RXQ_TXFREE_BAND1),
MT7996_RX_MCU_RING_SIZE,
MT7996_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND1)); if (ret) return ret;
}
if (mt7996_band_valid(dev, MT_BAND2)) { /* rx data queue for mt7996 band2 */
rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND2) + hif1_ofs;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2],
MT_RXQ_ID(MT_RXQ_BAND2),
MT7996_RX_RING_SIZE,
MT_RX_BUF_SIZE,
rx_base); if (ret) return ret;
/* tx free notify event from WA for mt7996 band2 * use pcie0's rx ring3, but, redirect pcie0 rx ring3 interrupt to pcie1
*/ if (mtk_wed_device_active(wed_hif2) && !dev->has_rro) {
dev->mt76.q_rx[MT_RXQ_BAND2_WA].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_BAND2_WA].wed = wed_hif2;
}
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND2_WA],
MT_RXQ_ID(MT_RXQ_BAND2_WA),
MT7996_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_BAND2_WA)); if (ret) return ret;
} elseif (mt7996_band_valid(dev, MT_BAND1)) { /* rx data queue for mt7992 band1 */
rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
MT_RXQ_ID(MT_RXQ_BAND1),
MT7996_RX_RING_SIZE,
MT_RX_BUF_SIZE,
rx_base); if (ret) return ret;
/* tx free notify event from WA for mt7992 band1 */ if (mt7996_has_wa(dev)) {
rx_base = MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
MT_RXQ_ID(MT_RXQ_BAND1_WA),
MT7996_RX_MCU_RING_SIZE,
MT_RX_BUF_SIZE,
rx_base); if (ret) return ret;
}
}
if (mtk_wed_device_active(wed) && mtk_wed_get_rx_capa(wed) &&
dev->has_rro) { /* rx rro data queue for band0 */
dev->mt76.q_rx[MT_RXQ_RRO_BAND0].flags =
MT_WED_RRO_Q_DATA(0) | MT_QFLAG_WED_RRO_EN;
dev->mt76.q_rx[MT_RXQ_RRO_BAND0].wed = wed;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND0],
MT_RXQ_ID(MT_RXQ_RRO_BAND0),
MT7996_RX_RING_SIZE,
MT7996_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND0)); if (ret) return ret;
/* tx free notify event from WA for band0 */
dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0].wed = wed;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND0],
MT_RXQ_ID(MT_RXQ_TXFREE_BAND0),
MT7996_RX_MCU_RING_SIZE,
MT7996_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND0)); if (ret) return ret;
if (mt7996_band_valid(dev, MT_BAND2)) { /* rx rro data queue for band2 */
dev->mt76.q_rx[MT_RXQ_RRO_BAND2].flags =
MT_WED_RRO_Q_DATA(1) | MT_QFLAG_WED_RRO_EN;
dev->mt76.q_rx[MT_RXQ_RRO_BAND2].wed = wed;
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_RRO_BAND2],
MT_RXQ_ID(MT_RXQ_RRO_BAND2),
MT7996_RX_RING_SIZE,
MT7996_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_RRO_BAND2) + hif1_ofs); if (ret) return ret;
/* tx free notify event from MAC for band2 */ if (mtk_wed_device_active(wed_hif2)) {
dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].flags = MT_WED_Q_TXFREE;
dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2].wed = wed_hif2;
}
ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_TXFREE_BAND2],
MT_RXQ_ID(MT_RXQ_TXFREE_BAND2),
MT7996_RX_MCU_RING_SIZE,
MT7996_RX_BUF_SIZE,
MT_RXQ_RING_BASE(MT_RXQ_TXFREE_BAND2) + hif1_ofs); if (ret) return ret;
}
}
ret = mt76_init_queues(dev, mt76_dma_rx_poll); if (ret < 0) return ret;
if (dev->hif2)
mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
MT_WFDMA0_GLO_CFG_TX_DMA_EN |
MT_WFDMA0_GLO_CFG_RX_DMA_EN);
usleep_range(1000, 2000);
for (i = 0; i < __MT_TXQ_MAX; i++) {
mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); if (phy2)
mt76_queue_tx_cleanup(dev, phy2->q_tx[i], true); if (phy3)
mt76_queue_tx_cleanup(dev, phy3->q_tx[i], true);
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
/* reset hw queues */ for (i = 0; i < __MT_TXQ_MAX; i++) {
mt76_dma_reset_tx_queue(&dev->mt76, dev->mphy.q_tx[i]); if (phy2)
mt76_dma_reset_tx_queue(&dev->mt76, phy2->q_tx[i]); if (phy3)
mt76_dma_reset_tx_queue(&dev->mt76, phy3->q_tx[i]);
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
mt76_for_each_q_rx(&dev->mt76, i) { if (mtk_wed_device_active(&dev->mt76.mmio.wed)) if (mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]) ||
mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i])) continue;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.