if (!bcm_vk_drv_access_ok(vk)) {
dev_err(&vk->pdev->dev, "Interrupt %d received when msgq not inited\n", irq); goto skip_schedule_work;
}
/* if notification is not pending, set bit and schedule work */ if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0)
queue_work(vk->wq_thread, &vk->wq_work);
skip_schedule_work: return IRQ_HANDLED;
}
staticint bcm_vk_intf_ver_chk(struct bcm_vk *vk)
{ struct device *dev = &vk->pdev->dev;
u32 reg;
u16 major, minor; int ret = 0;
/* check and make copy of alert with lock and then free lock */
spin_lock_irqsave(&vk->host_alert_lock, flags); if (intf_down)
vk->host_alert.notfs |= ERR_LOG_HOST_PCIE_DWN;
/* call display with copy */
bcm_vk_log_notf(vk, &alert, bcm_vk_host_err,
ARRAY_SIZE(bcm_vk_host_err));
/* * If it is a sys fault or heartbeat timeout, we would like extract * log msg from the card so that we would know what is the last fault
*/ if (!intf_down &&
((vk->host_alert.flags & ERR_LOG_HOST_HB_FAIL) ||
(vk->peer_alert.flags & ERR_LOG_SYS_FAULT)))
bcm_vk_dump_peer_log(vk);
}
/* first read the offset from spare register */
offset = vkread32(vk, BAR_0, BAR_CARD_STATIC_INFO);
offset &= (pci_resource_len(vk->pdev, BAR_2 * 2) - 1);
/* based on the offset, read info to internal card info structure */
dst = (u8 *)info; for (i = 0; i < sizeof(*info); i++)
*dst++ = vkread8(vk, BAR_2, offset++);
/* * get the peer log pointer, only need the offset, and get record * of the log buffer information which would be used for checking * before dump, in case the BAR2 memory has been corrupted.
*/
vk->peerlog_off = offset;
memcpy_fromio(&vk->peerlog_info, vk->bar[BAR_2] + vk->peerlog_off, sizeof(vk->peerlog_info));
/* * Do a range checking and if out of bound, the record will be zeroed * which guarantees that nothing would be dumped. In other words, * peer dump is disabled.
*/ if ((vk->peerlog_info.buf_size > BCM_VK_PEER_LOG_BUF_MAX) ||
(vk->peerlog_info.mask != (vk->peerlog_info.buf_size - 1)) ||
(vk->peerlog_info.rd_idx > vk->peerlog_info.mask) ||
(vk->peerlog_info.wr_idx > vk->peerlog_info.mask)) {
dev_err(dev, "Peer log disabled - range error: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
vk->peerlog_info.buf_size,
vk->peerlog_info.mask,
vk->peerlog_info.rd_idx,
vk->peerlog_info.wr_idx);
memset(&vk->peerlog_info, 0, sizeof(vk->peerlog_info));
} else {
dev_dbg(dev, "Peer log: Size 0x%x(0x%x), [Rd Wr] = [%d %d]\n",
vk->peerlog_info.buf_size,
vk->peerlog_info.mask,
vk->peerlog_info.rd_idx,
vk->peerlog_info.wr_idx);
}
}
/* calculate offset which is based on peerlog offset */
buf_size = vkread32(vk, BAR_2,
vk->peerlog_off
+ offsetof(struct bcm_vk_peer_log, buf_size));
offset = vk->peerlog_off + sizeof(struct bcm_vk_peer_log)
+ buf_size;
/* first read the num and entry size */
num = vkread32(vk, BAR_2, offset);
entry_size = vkread32(vk, BAR_2, offset + sizeof(num));
/* check for max allowed */ if (num > BCM_VK_PROC_MON_MAX) {
dev_err(dev, "Processing monitoring entry %d exceeds max %d\n",
num, BCM_VK_PROC_MON_MAX); return;
}
mon->num = num;
mon->entry_size = entry_size;
vk->proc_mon_off = offset;
/* read it once that will capture those static info */
dst = (u8 *)&mon->entries[0];
offset += sizeof(num) + sizeof(entry_size);
memcpy_fromio(dst, vk->bar[BAR_2] + offset, num * entry_size);
}
/* check for marker, but allow diags mode to skip sync */ if (!bcm_vk_msgq_marker_valid(vk)) return (rdy_marker == VK_BAR1_DIAG_RDY_MARKER ? 0 : -EINVAL);
/* * Write down scratch addr which is used for DMA. For * signed part, BAR1 is accessible only after boot2 has come * up
*/ if (vk->tdma_addr) {
vkwrite32(vk, (u64)vk->tdma_addr >> 32, BAR_1,
VK_BAR1_SCRATCH_OFF_HI);
vkwrite32(vk, (u32)vk->tdma_addr, BAR_1,
VK_BAR1_SCRATCH_OFF_LO);
vkwrite32(vk, nr_scratch_pages * PAGE_SIZE, BAR_1,
VK_BAR1_SCRATCH_SZ_ADDR);
}
/* get static card info, only need to read once */
bcm_vk_get_card_info(vk);
/* get the proc mon info once */
bcm_vk_get_proc_mon_info(vk);
return 0;
}
void bcm_vk_blk_drv_access(struct bcm_vk *vk)
{ int i;
/* * kill all the apps except for the process that is resetting. * If not called during reset, reset_pid will be 0, and all will be * killed.
*/
spin_lock(&vk->ctx_lock);
/* set msgq_inited to 0 so that all rd/wr will be blocked */
atomic_set(&vk->msgq_inited, 0);
for (i = 0; i < VK_PID_HT_SZ; i++) { struct bcm_vk_ctx *ctx;
list_for_each_entry(ctx, &vk->pid_ht[i].head, node) { if (ctx->pid != vk->reset_pid) {
dev_dbg(&vk->pdev->dev, "Send kill signal to pid %d\n",
ctx->pid);
kill_pid(find_vpid(ctx->pid), SIGKILL, 1);
}
}
}
bcm_vk_tty_terminate_tty_user(vk);
spin_unlock(&vk->ctx_lock);
}
if (load_type == VK_IMAGE_TYPE_BOOT1) { /* * After POR, enable VK soft BOOTSRC so bootrom do not clear * the pushed image (the TCM memories).
*/
value = vkread32(vk, BAR_0, BAR_BOOTSRC_SELECT);
value |= BOOTSRC_SOFT_ENABLE;
vkwrite32(vk, value, BAR_0, BAR_BOOTSRC_SELECT);
/* To send more data to VK than max_buf allowed at a time */ do { /* * Check for ack from card. when Ack is received, * it means all the data is received by card. * Exit the loop after ack is received.
*/
ret = bcm_vk_wait(vk, BAR_0, BAR_BOOT_STATUS,
FW_LOADER_ACK_RCVD_ALL_DATA,
FW_LOADER_ACK_RCVD_ALL_DATA,
TXFR_COMPLETE_TIMEOUT_MS); if (ret == 0) {
dev_dbg(dev, "Exit boot2 download\n"); break;
} elseif (ret == -EFAULT) {
dev_err(dev, "Error detected during ACK waiting"); goto err_firmware_out;
}
/* exit the loop, if there is no response from card */ if (time_after(jiffies, timeout)) {
dev_err(dev, "Error. No reply from card\n");
ret = -ETIMEDOUT; goto err_firmware_out;
}
/* Wait for VK to open BAR space to copy new data */
ret = bcm_vk_wait(vk, BAR_0, offset_codepush,
codepush, 0,
TXFR_COMPLETE_TIMEOUT_MS); if (ret == 0) {
offset += max_buf;
ret = request_partial_firmware_into_buf
(&fw,
filename,
dev, bufp,
max_buf,
offset); if (ret) {
dev_err(dev, "Error %d requesting firmware file: %s offset: 0x%zx\n",
ret, filename, offset); goto err_firmware_out;
}
dev_dbg(dev, "size=0x%zx\n", fw->size);
dev_dbg(dev, "Signaling 0x%x to 0x%llx\n",
codepush, offset_codepush);
vkwrite32(vk, codepush, BAR_0, offset_codepush); /* reload timeout after every codepush */
timeout = jiffies +
msecs_to_jiffies(LOAD_IMAGE_TIMEOUT_MS);
} elseif (ret == -EFAULT) {
dev_err(dev, "Error detected waiting for transfer\n"); goto err_firmware_out;
}
} while (1);
/* wait for fw status bits to indicate app ready */
ret = bcm_vk_wait(vk, BAR_0, VK_BAR_FWSTS,
VK_FWSTS_READY,
VK_FWSTS_READY,
BOOT2_STARTUP_TIMEOUT_MS); if (ret < 0) {
dev_err(dev, "Boot2 not ready - ret(%d)\n", ret); goto err_firmware_out;
}
is_stdalone = vkread32(vk, BAR_0, BAR_BOOT_STATUS) &
BOOT_STDALONE_RUNNING; if (!is_stdalone) {
ret = bcm_vk_intf_ver_chk(vk); if (ret) {
dev_err(dev, "failure in intf version check\n"); goto err_firmware_out;
}
/* * Next, initialize Message Q if we are loading boot2. * Do a force sync
*/
ret = bcm_vk_sync_msgq(vk, true); if (ret) {
dev_err(dev, "Boot2 Error reading comm msg Q info\n");
ret = -EIO; goto err_firmware_out;
}
/* sync & channel other info */
ret = bcm_vk_sync_card_info(vk); if (ret) {
dev_err(dev, "Syncing Card Info failure\n"); goto err_firmware_out;
}
}
}
err_firmware_out:
release_firmware(fw);
err_buf_out: if (bufp)
dma_free_coherent(dev, max_buf, bufp, boot_dma_addr);
/* Log status so that we know different stages */
dev_info(&vk->pdev->dev, "boot-status value for next image: 0x%x : fw-status 0x%x\n",
boot_status, fw_status);
switch (pdev->device) { case PCI_DEVICE_ID_VALKYRIE: /* get the chip id to decide sub-class */
rev = MAJOR_SOC_REV(vkread32(vk, BAR_0, BAR_CHIP_ID)); if (rev < ARRAY_SIZE(vk_soc_tab)) {
idx = vk_soc_tab[rev];
} else { /* Default to A0 firmware for all other chip revs */
idx = VALKYRIE_A0;
dev_warn(&pdev->dev, "Rev %d not in image lookup table, default to idx=%d\n",
rev, idx);
} break;
case PCI_DEVICE_ID_VIPER:
idx = VIPER; break;
default:
dev_err(&pdev->dev, "no images for 0x%x\n", pdev->device);
} return idx;
}
for (i = 0; i < IMG_PER_TYPE_MAX; i++) {
fw = NULL;
ret = request_partial_firmware_into_buf(&fw,
entry->image_name[i],
dev, &dummy, sizeof(dummy),
0);
release_firmware(fw); if (!ret) return entry->image_name[i];
} return NULL;
}
int bcm_vk_auto_load_all_images(struct bcm_vk *vk)
{ int i, ret = -1; enum soc_idx idx; struct device *dev = &vk->pdev->dev;
u32 curr_type; constchar *curr_name;
idx = get_soc_idx(vk); if (idx == VK_IDX_INVALID) goto auto_load_all_exit;
/* log a message to know the relative loading order */
dev_dbg(dev, "Load All for device %d\n", vk->devid);
for (i = 0; i < NUM_BOOT_STAGES; i++) {
curr_type = image_tab[idx][i].image_type; if (bcm_vk_next_boot_image(vk) == curr_type) {
curr_name = get_load_fw_name(vk, &image_tab[idx][i]); if (!curr_name) {
dev_err(dev, "No suitable firmware exists for type %d",
curr_type);
ret = -ENOENT; goto auto_load_all_exit;
}
ret = bcm_vk_load_image_by_type(vk, curr_type,
curr_name);
dev_info(dev, "Auto load %s, ret %d\n",
curr_name, ret);
/* * deferred work queue for draining and auto download.
*/ staticvoid bcm_vk_wq_handler(struct work_struct *work)
{ struct bcm_vk *vk = container_of(work, struct bcm_vk, wq_work); struct device *dev = &vk->pdev->dev;
s32 ret;
/* check wq offload bit map to perform various operations */ if (test_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload)) { /* clear bit right the way for notification */
clear_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload);
bcm_vk_handle_notf(vk);
} if (test_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload)) {
bcm_vk_auto_load_all_images(vk);
/* * at the end of operation, clear AUTO bit and pending * bit
*/
clear_bit(BCM_VK_WQ_DWNLD_AUTO, vk->wq_offload);
clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
}
/* next, try to drain */
ret = bcm_to_h_msg_dequeue(vk);
if (ret == 0)
dev_dbg(dev, "Spurious trigger for workqueue\n"); elseif (ret < 0)
bcm_vk_blk_drv_access(vk);
}
staticlong bcm_vk_load_image(struct bcm_vk *vk, conststruct vk_image __user *arg)
{ struct device *dev = &vk->pdev->dev; constchar *image_name; struct vk_image image;
u32 next_loadable; enum soc_idx idx; int image_idx; int ret = -EPERM;
if (copy_from_user(&image, arg, sizeof(image))) return -EACCES;
/* * if something is pending download already. This could only happen * for now when the driver is being loaded, or if someone has issued * another download command in another shell.
*/ if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) {
dev_err(dev, "Download operation already pending.\n"); return ret;
}
image_name = image.filename; if (image_name[0] == '\0') { /* Use default image name if NULL */
idx = get_soc_idx(vk); if (idx == VK_IDX_INVALID) goto err_idx;
/* Image idx starts with boot1 */
image_idx = image.type - VK_IMAGE_TYPE_BOOT1;
image_name = get_load_fw_name(vk, &image_tab[idx][image_idx]); if (!image_name) {
dev_err(dev, "No suitable image found for type %d",
image.type);
ret = -ENOENT; goto err_idx;
}
} else { /* Ensure filename is NULL terminated */
image.filename[sizeof(image.filename) - 1] = '\0';
}
ret = bcm_vk_load_image_by_type(vk, image.type, image_name);
dev_info(dev, "Load %s, ret %d\n", image_name, ret);
err_idx:
clear_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload);
return ret;
}
staticint bcm_vk_reset_successful(struct bcm_vk *vk)
{ struct device *dev = &vk->pdev->dev;
u32 fw_status, reset_reason; int ret = -EAGAIN;
/* * Reset could be triggered when the card in several state: * i) in bootROM * ii) after boot1 * iii) boot2 running * * i) & ii) - no status bits will be updated. If vkboot1 * runs automatically after reset, it will update the reason * to be unknown reason * iii) - reboot reason match + deinit done.
*/
fw_status = vkread32(vk, BAR_0, VK_BAR_FWSTS); /* immediate exit if interface goes down */ if (BCM_VK_INTF_IS_DOWN(fw_status)) {
dev_err(dev, "PCIe Intf Down!\n"); goto reset_exit;
}
reset_reason = (fw_status & VK_FWSTS_RESET_REASON_MASK); if ((reset_reason == VK_FWSTS_RESET_MBOX_DB) ||
(reset_reason == VK_FWSTS_RESET_UNKNOWN))
ret = 0;
/* * if some of the deinit bits are set, but done * bit is not, this is a failure if triggered while boot2 is running
*/ if ((fw_status & VK_FWSTS_DEINIT_TRIGGERED) &&
!(fw_status & VK_FWSTS_RESET_DONE))
ret = -EAGAIN;
reset_exit:
dev_dbg(dev, "FW status = 0x%x ret %d\n", fw_status, ret);
/* clean up before pressing the door bell */
bcm_vk_drain_msg_on_reset(vk);
vkwrite32(vk, 0, BAR_1, VK_BAR1_MSGQ_DEF_RDY); /* make tag '\0' terminated */
vkwrite32(vk, 0, BAR_1, VK_BAR1_BOOT1_VER_TAG);
for (i = 0; i < VK_BAR1_DAUTH_MAX; i++) {
vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_STORE_ADDR(i));
vkwrite32(vk, 0, BAR_1, VK_BAR1_DAUTH_VALID_ADDR(i));
} for (i = 0; i < VK_BAR1_SOTP_REVID_MAX; i++)
vkwrite32(vk, 0, BAR_1, VK_BAR1_SOTP_REVID_ADDR(i));
/* * When boot request fails, the CODE_PUSH_OFFSET stays persistent. * Allowing us to debug the failure. When we call reset, * we should clear CODE_PUSH_OFFSET so ROM does not execute * boot again (and fails again) and instead waits for a new * codepush. And, if previous boot has encountered error, need * to clear the entry values
*/
boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); if (boot_status & BOOT_ERR_MASK) {
dev_info(&vk->pdev->dev, "Card in boot error 0x%x, clear CODEPUSH val\n",
boot_status);
value = 0;
} else {
value = vkread32(vk, BAR_0, BAR_CODEPUSH_SBL);
value &= CODEPUSH_MASK;
}
vkwrite32(vk, value, BAR_0, BAR_CODEPUSH_SBL);
/* special reset handling */
is_stdalone = boot_status & BOOT_STDALONE_RUNNING;
is_boot2 = (boot_status & BOOT_STATE_MASK) == BOOT2_RUNNING; if (vk->peer_alert.flags & ERR_LOG_RAMDUMP) { /* * if card is in ramdump mode, it is hitting an error. Don't * reset the reboot reason as it will contain valid info that * is important - simply use special reset
*/
vkwrite32(vk, VK_BAR0_RESET_RAMPDUMP, BAR_0, VK_BAR_FWSTS); return VK_BAR0_RESET_RAMPDUMP;
} elseif (is_stdalone && !is_boot2) {
dev_info(&vk->pdev->dev, "Hard reset on Standalone mode");
bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_HARD); return VK_BAR0_RESET_DB_HARD;
}
/* reset fw_status with proper reason, and press db */
vkwrite32(vk, VK_FWSTS_RESET_MBOX_DB, BAR_0, VK_BAR_FWSTS);
bcm_to_v_reset_doorbell(vk, VK_BAR0_RESET_DB_SOFT);
/* clear other necessary registers and alert records */ for (i = 0; i < ARRAY_SIZE(bar0_reg_clr_list); i++)
vkwrite32(vk, 0, BAR_0, bar0_reg_clr_list[i]);
memset(&vk->host_alert, 0, sizeof(vk->host_alert));
memset(&vk->peer_alert, 0, sizeof(vk->peer_alert)); /* clear 4096 bits of bitmap */
bitmap_clear(vk->bmap, 0, VK_MSG_ID_BITMAP_SIZE);
return 0;
}
staticlong bcm_vk_reset(struct bcm_vk *vk, struct vk_reset __user *arg)
{ struct device *dev = &vk->pdev->dev; struct vk_reset reset; int ret = 0;
u32 ramdump_reset; int special_reset;
if (copy_from_user(&reset, arg, sizeof(struct vk_reset))) return -EFAULT;
/* check if any download is in-progress, if so return error */ if (test_and_set_bit(BCM_VK_WQ_DWNLD_PEND, vk->wq_offload) != 0) {
dev_err(dev, "Download operation pending - skip reset.\n"); return -EPERM;
}
/* * The following is the sequence of reset: * - send card level graceful shut down * - wait enough time for VK to handle its business, stopping DMA etc * - kill host apps * - Trigger interrupt with DB
*/
bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_GRACEFUL, 0, 0);
spin_lock(&vk->ctx_lock); if (!vk->reset_pid) {
vk->reset_pid = task_pid_nr(current);
} else {
dev_err(dev, "Reset already launched by process pid %d\n",
vk->reset_pid);
ret = -EACCES;
}
spin_unlock(&vk->ctx_lock); if (ret) goto err_exit;
/* * Wait enough time for card os to deinit * and populate the reset reason.
*/
msleep(BCM_VK_DEINIT_TIME_MS);
if (special_reset) { /* if it is special ramdump reset, return the type to user */
reset.arg2 = special_reset; if (copy_to_user(arg, &reset, sizeof(reset)))
ret = -EFAULT;
} else {
ret = bcm_vk_reset_successful(vk);
}
/* make sure DMA is good */
err = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(BCM_VK_DMA_BITS)); if (err) {
dev_err(dev, "failed to set DMA mask\n"); goto err_disable_pdev;
}
/* The tdma is a scratch area for some DMA testings. */ if (nr_scratch_pages) {
vk->tdma_vaddr = dma_alloc_coherent
(dev,
nr_scratch_pages * PAGE_SIZE,
&vk->tdma_addr, GFP_KERNEL); if (!vk->tdma_vaddr) {
err = -ENOMEM; goto err_disable_pdev;
}
}
if (irq < VK_MSIX_IRQ_MIN_REQ) {
dev_err(dev, "failed to get min %d MSIX interrupts, irq(%d)\n",
VK_MSIX_IRQ_MIN_REQ, irq);
err = (irq >= 0) ? -EINVAL : irq; goto err_disable_pdev;
}
if (irq != VK_MSIX_IRQ_MAX)
dev_warn(dev, "Number of IRQs %d allocated - requested(%d).\n",
irq, VK_MSIX_IRQ_MAX);
for (i = 0; i < MAX_BAR; i++) { /* multiple by 2 for 64 bit BAR mapping */
vk->bar[i] = pci_ioremap_bar(pdev, i * 2); if (!vk->bar[i]) {
dev_err(dev, "failed to remap BAR%d\n", i);
err = -ENOMEM; goto err_iounmap;
}
}
for (vk->num_irqs = 0;
vk->num_irqs < VK_MSIX_MSGQ_MAX;
vk->num_irqs++) {
err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
bcm_vk_msgq_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, vk); if (err) {
dev_err(dev, "failed to request msgq IRQ %d for MSIX %d\n",
pdev->irq + vk->num_irqs, vk->num_irqs + 1); goto err_irq;
}
} /* one irq for notification from VK */
err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
bcm_vk_notf_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, vk); if (err) {
dev_err(dev, "failed to request notf IRQ %d for MSIX %d\n",
pdev->irq + vk->num_irqs, vk->num_irqs + 1); goto err_irq;
}
vk->num_irqs++;
for (i = 0;
(i < VK_MSIX_TTY_MAX) && (vk->num_irqs < irq);
i++, vk->num_irqs++) {
err = devm_request_irq(dev, pci_irq_vector(pdev, vk->num_irqs),
bcm_vk_tty_irqhandler,
IRQF_SHARED, DRV_MODULE_NAME, vk); if (err) {
dev_err(dev, "failed request tty IRQ %d for MSIX %d\n",
pdev->irq + vk->num_irqs, vk->num_irqs + 1); goto err_irq;
}
bcm_vk_tty_set_irq_enabled(vk, i);
}
id = ida_alloc(&bcm_vk_ida, GFP_KERNEL); if (id < 0) {
err = id;
dev_err(dev, "unable to get id\n"); goto err_irq;
}
/* * lets trigger an auto download. We don't want to do it serially here * because at probing time, it is not supposed to block for a long time.
*/
boot_status = vkread32(vk, BAR_0, BAR_BOOT_STATUS); if (auto_load) { if ((boot_status & BOOT_STATE_MASK) == BROM_RUNNING) {
err = bcm_vk_trigger_autoload(vk); if (err) goto err_bcm_vk_tty_exit;
} else {
dev_err(dev, "Auto-load skipped - BROM not in proper state (0x%x)\n",
boot_status);
}
}
/* * Trigger a reset to card and wait enough time for UCODE to rerun, * which re-initialize the card into its default state. * This ensures when driver is re-enumerated it will start from * a completely clean state.
*/
bcm_vk_trigger_reset(vk);
usleep_range(BCM_VK_UCODE_BOOT_US, BCM_VK_UCODE_BOOT_MAX_US);
if (vk->tdma_vaddr)
dma_free_coherent(&pdev->dev, nr_scratch_pages * PAGE_SIZE,
vk->tdma_vaddr, vk->tdma_addr);
/* remove if name is set which means misc dev registered */ if (misc_device->name) {
misc_deregister(misc_device);
kfree(misc_device->name);
ida_free(&bcm_vk_ida, vk->devid);
} for (i = 0; i < vk->num_irqs; i++)
devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), vk);
if (boot_stat == BOOT1_RUNNING) { /* simply trigger a reset interrupt to park it */
bcm_vk_trigger_reset(vk);
} elseif (boot_stat == BROM_NOT_RUN) { int err;
u16 lnksta;
/* * The boot status only reflects boot condition since last reset * As ucode will run only once to configure pcie, if multiple * resets happen, we lost track if ucode has run or not. * Here, read the current link speed and use that to * sync up the bootstatus properly so that on reboot-back-up, * it has the proper state to start with autoload
*/
err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); if (!err &&
(lnksta & PCI_EXP_LNKSTA_CLS) != PCI_EXP_LNKSTA_CLS_2_5GB) {
reg |= BROM_STATUS_COMPLETE;
vkwrite32(vk, reg, BAR_0, BAR_BOOT_STATUS);
}
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.