if (fw_mon->size) {
memset(fw_mon->block, 0, fw_mon->size); return;
}
/* need at least 2 KiB, so stop at 11 */ for (power = max_power; power >= 11; power--) {
size = BIT(power);
block = dma_alloc_coherent(trans->dev, size, &physical,
GFP_KERNEL | __GFP_NOWARN); if (!block) continue;
IWL_INFO(trans, "Allocated 0x%08x bytes for firmware monitor.\n",
size); break;
}
if (WARN_ON_ONCE(!block)) return;
if (power != max_power)
IWL_ERR(trans, "Sorry - debug buffer is only %luK while you requested %luK\n",
(unsignedlong)BIT(power - 10),
(unsignedlong)BIT(max_power - 10));
/* * L0S states have been found to be unstable with our devices * and in newer hardware they are not officially supported at * all, so we must always set the L0S_DISABLED bit.
*/
iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
/* * Start up NIC's basic functionality after it has been reset * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop()) * NOTE: This does not load uCode nor start the embedded processor
*/ staticint iwl_pcie_apm_init(struct iwl_trans *trans)
{ int ret;
/* * Disable L0s without affecting L1; * don't wait for ICH L0s (ICH bug W/A)
*/
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
/* Set FH wait threshold to maximum (HW error during stress W/A) */
iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
/* * Enable HAP INTA (interrupt from management bus) to * wake device's PCI Express link L1a -> L0s
*/
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_HAP_WAKE);
iwl_pcie_apm_config(trans);
/* Configure analog phase-lock-loop before activating to D0A */ if (trans->mac_cfg->base->pll_cfg)
iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
ret = iwl_finish_nic_init(trans); if (ret) return ret;
if (trans->cfg->host_interrupt_operation_mode) { /* * This is a bit of an abuse - This is needed for 7260 / 3160 * only check host_interrupt_operation_mode even if this is * not related to host_interrupt_operation_mode. * * Enable the oscillator to count wake up time for L1 exit. This * consumes slightly more power (100uA) - but allows to be sure * that we wake up from L1 on time. * * This looks weird: read twice the same register, discard the * value, set a bit, and yet again, read that same register * just to discard the value. But that's the way the hardware * seems to like it.
*/
iwl_read_prph(trans, OSC_CLK);
iwl_read_prph(trans, OSC_CLK);
iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
iwl_read_prph(trans, OSC_CLK);
iwl_read_prph(trans, OSC_CLK);
}
/* * Enable DMA clock and wait for it to stabilize. * * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" * bits do not disable clocks. This preserves any hardware * bits already set by default in "CLK_CTRL_REG" after reset.
*/ if (!trans->mac_cfg->base->apmg_not_supported) {
iwl_write_prph(trans, APMG_CLK_EN_REG,
APMG_CLK_VAL_DMA_CLK_RQT);
udelay(20);
/* Clear the interrupt in APMG if the NIC is in RFKILL */
iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
APMG_RTC_INT_STT_RFKILL);
}
set_bit(STATUS_DEVICE_ENABLED, &trans->status);
return 0;
}
/* * Enable LP XTAL to avoid HW bug where device may consume much power if * FW is not loaded after device reset. LP XTAL is disabled by default * after device HW reset. Do it only if XTAL is fed by internal source. * Configure device's "persistence" mode to avoid resetting XTAL again when * SHRD_HW_RST occurs in S3.
*/ staticvoid iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
{ int ret;
u32 apmg_gp1_reg;
u32 apmg_xtal_cfg_reg;
u32 dl_cfg_reg;
/* Force XTAL ON */
iwl_trans_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
ret = iwl_trans_pcie_sw_reset(trans, true);
if (!ret)
ret = iwl_finish_nic_init(trans);
if (WARN_ON(ret)) { /* Release XTAL ON request */
iwl_trans_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_XTAL_ON); return;
}
/* * Clear "disable persistence" to avoid LP XTAL resetting when * SHRD_HW_RST is applied in S3.
*/
iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_PERSIST_DIS);
/* * Force APMG XTAL to be active to prevent its disabling by HW * caused by APMG idle state.
*/
apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
SHR_APMG_XTAL_CFG_REG);
iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
apmg_xtal_cfg_reg |
SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
ret = iwl_trans_pcie_sw_reset(trans, true); if (ret)
IWL_ERR(trans, "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n");
/* If HW is not ready, prepare the conditions to check again */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_WAKE_ME);
do {
ret = iwl_pcie_set_hw_ready(trans); if (!ret) {
trans->csme_own = false; return 0;
}
if (iwl_mei_is_connected()) {
IWL_DEBUG_INFO(trans, "Couldn't prepare the card but SAP is connected\n");
trans->csme_own = true; if (trans->mac_cfg->device_family !=
IWL_DEVICE_FAMILY_9000)
IWL_ERR(trans, "SAP not supported for this NIC family\n");
return -EBUSY;
}
usleep_range(200, 1000);
t += 200;
} while (t < 150000);
msleep(25);
}
staticint iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans, conststruct fw_img *image, int cpu, int *first_ucode_section)
{ int shift_param; int i, ret = 0, sec_num = 0x1;
u32 val, last_read_idx = 0;
for (i = *first_ucode_section; i < image->num_sec; i++) {
last_read_idx = i;
/* * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between * CPU1 to CPU2. * PAGING_SEPARATOR_SECTION delimiter - separate between * CPU2 non paged to CPU2 paging sec.
*/ if (!image->sec[i].data ||
image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n",
i); break;
}
ret = iwl_pcie_load_section(trans, i, &image->sec[i]); if (ret) return ret;
/* Notify ucode of loaded section number and status */
val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
val = val | (sec_num << shift_param);
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
sec_num = (sec_num << 1) | 0x1;
}
*first_ucode_section = last_read_idx;
iwl_enable_interrupts(trans);
if (trans->mac_cfg->gen2) { if (cpu == 1)
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
0xFFFF); else
iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
0xFFFFFFFF);
} else { if (cpu == 1)
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
0xFFFF); else
iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
0xFFFFFFFF);
}
return 0;
}
staticint iwl_pcie_load_cpu_sections(struct iwl_trans *trans, conststruct fw_img *image, int cpu, int *first_ucode_section)
{ int i, ret = 0;
u32 last_read_idx = 0;
if (cpu == 1)
*first_ucode_section = 0; else
(*first_ucode_section)++;
for (i = *first_ucode_section; i < image->num_sec; i++) {
last_read_idx = i;
/* * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between * CPU1 to CPU2. * PAGING_SEPARATOR_SECTION delimiter - separate between * CPU2 non paged to CPU2 paging sec.
*/ if (!image->sec[i].data ||
image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
IWL_DEBUG_FW(trans, "Break since Data not valid or Empty section, sec = %d\n",
i); break;
}
ret = iwl_pcie_load_section(trans, i, &image->sec[i]); if (ret) return ret;
}
if (dest->monitor_mode == EXTERNAL_MODE)
iwl_pcie_alloc_fw_monitor(trans, dest->size_power); else
IWL_WARN(trans, "PCI should have external buffer debug\n");
for (i = 0; i < trans->dbg.n_dest_reg; i++) {
u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
u32 val = le32_to_cpu(dest->reg_ops[i].val);
switch (dest->reg_ops[i].op) { case CSR_ASSIGN:
iwl_write32(trans, addr, val); break; case CSR_SETBIT:
iwl_set_bit(trans, addr, BIT(val)); break; case CSR_CLEARBIT:
iwl_clear_bit(trans, addr, BIT(val)); break; case PRPH_ASSIGN:
iwl_write_prph(trans, addr, val); break; case PRPH_SETBIT:
iwl_set_bits_prph(trans, addr, BIT(val)); break; case PRPH_CLEARBIT:
iwl_clear_bits_prph(trans, addr, BIT(val)); break; case PRPH_BLOCKBIT: if (iwl_read_prph(trans, addr) & BIT(val)) {
IWL_ERR(trans, "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
val, addr); goto monitor;
} break; default:
IWL_ERR(trans, "FW debug - unknown OP %d\n",
dest->reg_ops[i].op); break;
}
}
staticint iwl_pcie_load_given_ucode(struct iwl_trans *trans, conststruct fw_img *image)
{ int ret = 0; int first_ucode_section;
IWL_DEBUG_FW(trans, "working with %s CPU\n",
image->is_dual_cpus ? "Dual" : "Single");
/* load to FW the binary non secured sections of CPU1 */
ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section); if (ret) return ret;
if (image->is_dual_cpus) { /* set CPU2 header address */
iwl_write_prph(trans,
LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
LMPM_SECURE_CPU2_HDR_MEM_SPACE);
/* load to FW the binary sections of CPU2 */
ret = iwl_pcie_load_cpu_sections(trans, image, 2,
&first_ucode_section); if (ret) return ret;
}
if (iwl_pcie_dbg_on(trans))
iwl_pcie_apply_destination(trans);
iwl_enable_interrupts(trans);
/* release CPU reset */
iwl_write32(trans, CSR_RESET, 0);
return 0;
}
staticint iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, conststruct fw_img *image)
{ int ret = 0; int first_ucode_section;
IWL_DEBUG_FW(trans, "working with %s CPU\n",
image->is_dual_cpus ? "Dual" : "Single");
if (iwl_pcie_dbg_on(trans))
iwl_pcie_apply_destination(trans);
IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
iwl_read_prph(trans, WFPM_GP2));
/* * Set default value. On resume reading the values that were * zeored can provide debug data on the resume flow. * This is for debugging only and has no functional impact.
*/
iwl_write_prph(trans, WFPM_GP2, 0x01010101);
/* configure the ucode to be ready to get the secured image */ /* release CPU reset */
iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
/* load to FW the binary Secured sections of CPU1 */
ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
&first_ucode_section); if (ret) return ret;
/* load to FW the binary sections of CPU2 */ return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
&first_ucode_section);
}
staticvoid iwl_pcie_map_list(struct iwl_trans *trans, conststruct iwl_causes_list *causes, int arr_size, int val)
{ int i;
for (i = 0; i < arr_size; i++) {
iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
iwl_clear_bit(trans, causes[i].mask_reg,
BIT(causes[i].bit));
}
}
staticvoid iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; /* * Access all non RX causes and map them to the default irq. * In case we are missing at least one interrupt vector, * the first interrupt vector will serve non-RX and FBQ causes.
*/
iwl_pcie_map_list(trans, causes_list_common,
ARRAY_SIZE(causes_list_common), val); if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_pcie_map_list(trans, causes_list_bz,
ARRAY_SIZE(causes_list_bz), val); else
iwl_pcie_map_list(trans, causes_list_pre_bz,
ARRAY_SIZE(causes_list_pre_bz), val);
}
/* * The first RX queue - fallback queue, which is designated for * management frame, command responses etc, is always mapped to the * first interrupt vector. The other RX queues are mapped to * the other (N - 2) interrupt vectors.
*/
val = BIT(MSIX_FH_INT_CAUSES_Q(0)); for (idx = 1; idx < trans->info.num_rxqs; idx++) {
iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
MSIX_FH_INT_CAUSES_Q(idx - offset));
val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
}
iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
val = MSIX_FH_INT_CAUSES_Q(0); if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
val |= MSIX_NON_AUTO_CLEAR_CAUSE;
iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
}
if (!trans_pcie->msix_enabled) { if (trans->mac_cfg->mq_rx_supported &&
test_bit(STATUS_DEVICE_ENABLED, &trans->status))
iwl_write_umac_prph(trans, UREG_CHICK,
UREG_CHICK_MSI_ENABLE); return;
} /* * The IVAR table needs to be configured again after reset, * but if the device is disabled, we can't write to * prph.
*/ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
/* * Each cause from the causes list above and the RX causes is * represented as a byte in the IVAR table. The first nibble * represents the bound interrupt vector of the cause, the second * represents no auto clear for this cause. This will be set if its * interrupt vector is bound to serve other causes.
*/
iwl_pcie_map_rx_causes(trans);
/* tell the device to stop sending interrupts */
iwl_disable_interrupts(trans);
/* device going down, Stop using ICT table */
iwl_pcie_disable_ict(trans);
/* * If a HW restart happens during firmware loading, * then the firmware loading might call this function * and later it might be called again due to the * restart. So don't process again if the device is * already dead.
*/ if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); if (!from_irq)
iwl_pcie_synchronize_irqs(trans);
iwl_pcie_rx_napi_sync(trans);
iwl_pcie_tx_stop(trans);
iwl_pcie_rx_stop(trans);
/* Make sure (redundant) we've released our request to stay awake */ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); else
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
/* re-take ownership to prevent other users from stealing the device */
iwl_trans_pcie_sw_reset(trans, true);
/* * Upon stop, the IVAR table gets erased, so msi-x won't * work. This causes a bug in RF-KILL flows, since the interrupt * that enables radio won't fire on the correct irq, and the * driver won't be able to handle the interrupt. * Configure the IVAR table again after reset.
*/
iwl_pcie_conf_msix_hw(trans_pcie);
/* * Upon stop, the APM issues an interrupt if HW RF kill is set. * This is a bug in certain verions of the hardware. * Certain devices also keep sending HW RF kill interrupt all * the time, unless the interrupt is ACKed even if the interrupt * should be masked. Re-ACK all the interrupts here.
*/
iwl_disable_interrupts(trans);
/* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
clear_bit(STATUS_INT_ENABLED, &trans->status);
clear_bit(STATUS_TPOWER_PMI, &trans->status);
/* * Even if we stop the HW, we still want the RF kill * interrupt
*/
iwl_enable_rfkill_int(trans);
}
for (i = 0; i < trans_pcie->alloc_vecs; i++)
synchronize_irq(trans_pcie->msix_entries[i].vector);
} else {
synchronize_irq(trans_pcie->pci_dev->irq);
}
}
/* This may fail if AMT took ownership of the device */ if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n"); return -EIO;
}
iwl_enable_rfkill_int(trans);
iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
/* * We enabled the RF-Kill interrupt and the handler may very * well be running. Disable the interrupts to make sure no other * interrupt can be fired.
*/
iwl_disable_interrupts(trans);
/* Make sure it finished running */
iwl_pcie_synchronize_irqs(trans);
mutex_lock(&trans_pcie->mutex);
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill) {
ret = -ERFKILL; goto out;
}
/* Someone called stop_device, don't try to start_fw */ if (trans_pcie->is_down) {
IWL_WARN(trans, "Can't start_fw since the HW hasn't been started\n");
ret = -EIO; goto out;
}
/* make sure rfkill handshake bits are cleared */
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
ret = iwl_pcie_nic_init(trans); if (ret) {
IWL_ERR(trans, "Unable to init nic\n"); goto out;
}
/* * Now, we load the firmware and don't want to be interrupted, even * by the RF-Kill interrupt (hence mask all the interrupt besides the * FH_TX interrupt which is needed to load the firmware). If the * RF-Kill switch is toggled, we will find out after having loaded * the firmware and return the proper value to the caller.
*/
iwl_enable_fw_load_int(trans);
/* really make sure rfkill handshake bits are cleared */
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
/* Load the given image to the HW */ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
ret = iwl_pcie_load_given_ucode_8000(trans, img); else
ret = iwl_pcie_load_given_ucode(trans, img);
/* re-check RF-Kill state since we may have missed the interrupt */
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans); if (hw_rfkill && !run_in_rfkill)
ret = -ERFKILL;
/* * Check again since the RF kill state may have changed while * all the interrupts were disabled, in this case we couldn't * receive the RF kill interrupt and update the state in the * op_mode. * Don't call the op_mode if the rkfill state hasn't changed. * This allows the op_mode to call stop_device from the rfkill * notification without endless recursion. Under very rare * circumstances, we might have a small recursion if the rfkill * state changed exactly now while we were called from stop_device. * This is very unlikely but can happen and is supported.
*/
hw_rfkill = iwl_is_rfkill_set(trans); if (hw_rfkill) {
set_bit(STATUS_RFKILL_HW, &trans->status);
set_bit(STATUS_RFKILL_OPMODE, &trans->status);
} else {
clear_bit(STATUS_RFKILL_HW, &trans->status);
clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
} if (hw_rfkill != was_in_rfkill)
iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
}
if (reset) { /* * reset TX queues -- some of their registers reset during S3 * so if we don't reset everything here the D3 image would try * to execute some invalid memory upon resume
*/
iwl_trans_pcie_tx_reset(trans);
}
ret = iwl_finish_nic_init(trans); if (ret) return ret;
/* * Reconfigure IVAR table in case of MSIX or reset ict table in * MSI mode since HW reset erased it. * Also enables interrupts - none will happen as * the device doesn't know we're waking it up, only when * the opmode actually tells it after this call.
*/
iwl_pcie_conf_msix_hw(trans_pcie); if (!trans_pcie->msix_enabled)
iwl_pcie_reset_ict(trans);
iwl_enable_interrupts(trans);
iwl_pcie_set_pwr(trans, false);
if (!reset) {
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
} else {
iwl_trans_pcie_tx_reset(trans);
ret = iwl_pcie_rx_init(trans); if (ret) {
IWL_ERR(trans, "Failed to resume the device (RX reset)\n"); return ret;
}
}
IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
iwl_read_umac_prph(trans, WFPM_GP2));
val = iwl_read32(trans, CSR_RESET); if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
*status = IWL_D3_STATUS_RESET; else
*status = IWL_D3_STATUS_ALIVE;
out: if (*status == IWL_D3_STATUS_ALIVE)
ret = iwl_pcie_d3_handshake(trans, false); else
trans->state = IWL_TRANS_NO_FW;
IWL_DEBUG_INFO(trans, "MSI-X enabled. %d interrupt vectors were allocated\n",
num_irqs);
/* * In case the OS provides fewer interrupts than requested, different * causes will share the same interrupt vector as follows: * One interrupt less: non rx causes shared with FBQ. * Two interrupts less: non rx causes shared with FBQ and RSS. * More than two interrupts: we will use fewer RSS queues.
*/ if (num_irqs <= max_irqs - 2) {
info->num_rxqs = num_irqs + 1;
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
IWL_SHARED_IRQ_FIRST_RSS;
} elseif (num_irqs == max_irqs - 1) {
info->num_rxqs = num_irqs;
trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
} else {
info->num_rxqs = num_irqs - 1;
}
staticvoid iwl_pcie_irq_set_affinity(struct iwl_trans *trans, struct iwl_trans_info *info)
{ #ifdefined(CONFIG_SMP) int iter_rx_q, i, ret, cpu, offset; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
iter_rx_q = info->num_rxqs - 1 + i;
offset = 1 + i; for (; i < iter_rx_q ; i++) { /* * Get the cpu prior to the place to search * (i.e. return will be > i - 1).
*/
cpu = cpumask_next(i - offset, cpu_online_mask);
cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
&trans_pcie->affinity_mask[i]); if (ret)
IWL_ERR(trans_pcie->trans, "Failed to set affinity mask for IRQ %d\n",
trans_pcie->msix_entries[i].vector);
} #endif
}
void iwl_trans_pcie_check_product_reset_mode(struct pci_dev *pdev)
{ union acpi_object *res;
res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
DSM_INTERNAL_PLDR_CMD_GET_MODE,
0); if (IS_ERR(res)) return;
if (res->type != ACPI_TYPE_INTEGER)
IWL_ERR_DEV(&pdev->dev, "unexpected return type from product reset DSM\n"); else
IWL_DEBUG_DEV_POWER(&pdev->dev, "product reset mode is 0x%llx\n",
res->integer.value);
if (!integrated)
mode |= DSM_INTERNAL_PLDR_MODE_EN_WIFI_FLR |
DSM_INTERNAL_PLDR_MODE_EN_BT_OFF_ON;
res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
DSM_INTERNAL_PLDR_CMD_SET_MODE,
mode); if (IS_ERR(res)) { if (enable)
IWL_ERR_DEV(&pdev->dev, "ACPI _DSM not available (%d), cannot do product reset\n",
(int)PTR_ERR(res)); return;
}
void iwl_trans_pcie_check_product_reset_status(struct pci_dev *pdev)
{ union acpi_object *res;
res = iwl_trans_pcie_call_prod_reset_dsm(pdev,
DSM_INTERNAL_PLDR_CMD_GET_STATUS,
0); if (IS_ERR(res)) return;
if (res->type != ACPI_TYPE_INTEGER)
IWL_ERR_DEV(&pdev->dev, "unexpected return type from product reset DSM\n"); else
IWL_DEBUG_DEV_POWER(&pdev->dev, "product reset status is 0x%llx\n",
res->integer.value);
ACPI_FREE(res);
}
staticvoid iwl_trans_pcie_call_reset(struct pci_dev *pdev)
{ #ifdef CONFIG_ACPI struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *p, *ref;
acpi_status status; int ret = -EINVAL;
status = acpi_evaluate_object(ACPI_HANDLE(&pdev->dev), "_PRR", NULL, &buffer); if (ACPI_FAILURE(status)) {
IWL_DEBUG_DEV_POWER(&pdev->dev, "No _PRR method found\n"); goto out;
}
p = buffer.pointer;
ref = &p->package.elements[0]; if (ref->type != ACPI_TYPE_LOCAL_REFERENCE) {
pci_err(pdev, "_PRR wasn't a reference\n"); goto out;
}
status = acpi_evaluate_object(ref->reference.handle, "_RST", NULL, NULL); if (ACPI_FAILURE(status)) {
pci_err(pdev, "Failed to call _RST on object returned by _PRR (%d)\n",
status); goto out;
}
ret = 0;
out:
kfree(buffer.pointer); if (!ret) {
IWL_DEBUG_DEV_POWER(&pdev->dev, "called _RST on _PRR object\n"); return;
}
IWL_DEBUG_DEV_POWER(&pdev->dev, "No BIOS support, using pci_reset_function()\n"); #endif
pci_reset_function(pdev);
}
if (WARN_ON(mode < IWL_RESET_MODE_REMOVE_ONLY ||
mode == IWL_RESET_MODE_BACKOFF)) return;
if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return;
if (trans_pcie->me_present && mode == IWL_RESET_MODE_PROD_RESET) {
mode = IWL_RESET_MODE_FUNC_RESET; if (trans_pcie->me_present < 0)
msg = " instead of product reset as ME may be present"; else
msg = " instead of product reset as ME is present";
}
/* * get a module reference to avoid doing this * while unloading anyway and to avoid * scheduling a work with code that's being * removed.
*/ if (!try_module_get(THIS_MODULE)) {
IWL_ERR(trans, "Module is being unloaded - abort\n"); return;
}
removal = kzalloc(sizeof(*removal), GFP_ATOMIC); if (!removal) {
module_put(THIS_MODULE); return;
} /* * we don't need to clear this flag, because * the trans will be freed and reallocated.
*/
set_bit(STATUS_TRANS_DEAD, &trans->status);
/* this bit wakes up the NIC */
iwl_trans_set_bit(trans, CSR_GP_CNTRL, write); if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
udelay(2);
/* * These bits say the device is running, and should keep running for * at least a short while (at least as long as MAC_ACCESS_REQ stays 1), * but they do not indicate that embedded SRAM is restored yet; * HW with volatile SRAM must save/restore contents to/from * host DRAM when sleeping/waking for power-saving. * Each direction takes approximately 1/4 millisecond; with this * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a * series of register accesses are expected (e.g. reading Event Log), * to keep device from sleeping. * * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that * SRAM is okay/restored. We don't check that here because this call * is just for hardware register access; but GP1 MAC_SLEEP * check is a good idea before accessing the SRAM of HW with * volatile SRAM (e.g. reading Event Log). * * 5000 series and later (including 1000 series) have non-volatile SRAM, * and do not save/restore SRAM when power cycling.
*/
ret = iwl_poll_bits_mask(trans, CSR_GP_CNTRL, poll, mask, 15000); if (unlikely(ret)) {
u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
if (silent) {
spin_unlock(&trans_pcie->reg_lock); returnfalse;
}
WARN_ONCE(1, "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
cntrl);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.