/** * struct mci_slot_pdata - board-specific per-slot configuration * @bus_width: Number of data lines wired up the slot * @detect_pin: GPIO pin wired to the card detect switch * @wp_pin: GPIO pin wired to the write protect sensor * @non_removable: The slot is not removable, only detect once * * If a given slot is not present on the board, @bus_width should be * set to 0. The other fields are ignored in this case. * * Any pins that aren't available should be set to a negative value. * * Note that support for multiple slots is experimental -- some cards * might get upset if we don't get the clock management exactly right. * But in most cases, it should work just fine.
*/ struct mci_slot_pdata { unsignedint bus_width; struct gpio_desc *detect_pin; struct gpio_desc *wp_pin; bool non_removable;
};
/** * struct atmel_mci - MMC controller state shared between all slots * @lock: Spinlock protecting the queue and associated data. * @regs: Pointer to MMIO registers. * @sg: Scatterlist entry currently being processed by PIO or PDC code. * @sg_len: Size of the scatterlist * @pio_offset: Offset into the current scatterlist entry. * @buffer: Buffer used if we don't have the r/w proof capability. We * don't have the time to switch pdc buffers so we have to use only * one buffer for the full transaction. * @buf_size: size of the buffer. * @buf_phys_addr: buffer address needed for pdc. * @cur_slot: The slot which is currently using the controller. * @mrq: The request currently being processed on @cur_slot, * or NULL if the controller is idle. * @cmd: The command currently being sent to the card, or NULL. * @data: The data currently being transferred, or NULL if no data * transfer is in progress. * @data_size: just data->blocks * data->blksz. * @dma: DMA client state. * @data_chan: DMA channel being used for the current data transfer. * @dma_conf: Configuration for the DMA slave * @cmd_status: Snapshot of SR taken upon completion of the current * command. Only valid when EVENT_CMD_COMPLETE is pending. * @data_status: Snapshot of SR taken upon completion of the current * data transfer. Only valid when EVENT_DATA_COMPLETE or * EVENT_DATA_ERROR is pending. * @stop_cmdr: Value to be loaded into CMDR when the stop command is * to be sent. * @bh_work: Work running the request state machine. * @pending_events: Bitmask of events flagged by the interrupt handler * to be processed by the work. * @completed_events: Bitmask of events which the state machine has * processed. * @state: Work state. * @queue: List of slots waiting for access to the controller. * @need_clock_update: Update the clock rate before the next request. * @need_reset: Reset controller before next request. * @timer: Timer to balance the data timeout error flag which cannot rise. * @mode_reg: Value of the MR register. * @cfg_reg: Value of the CFG register. * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus * rate and timeout calculations. * @mapbase: Physical address of the MMIO registers. * @mck: The peripheral bus clock hooked up to the MMC controller. * @dev: Device associated with the MMC controller. * @pdata: Per-slot configuration data. * @slot: Slots sharing this MMC controller. * @caps: MCI capabilities depending on MCI version. * @prepare_data: function to setup MCI before data transfer which * depends on MCI capabilities. * @submit_data: function to start data transfer which depends on MCI * capabilities. * @stop_transfer: function to stop data transfer which depends on MCI * capabilities. * * Locking * ======= * * @lock is a softirq-safe spinlock protecting @queue as well as * @cur_slot, @mrq and @state. These must always be updated * at the same time while holding @lock. * * @lock also protects mode_reg and need_clock_update since these are * used to synchronize mode register updates with the queue * processing. * * The @mrq field of struct atmel_mci_slot is also protected by @lock, * and must always be written at the same time as the slot is added to * @queue. * * @pending_events and @completed_events are accessed using atomic bit * operations, so they don't need any locking. * * None of the fields touched by the interrupt handler need any * locking. However, ordering is important: Before EVENT_DATA_ERROR or * EVENT_DATA_COMPLETE is set in @pending_events, all data-related * interrupts must be disabled and @data_status updated with a * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the * CMDRDY interrupt must be disabled and @cmd_status updated with a * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the * bytes_xfered field of @data must be written. This is ensured by * using barriers.
*/ struct atmel_mci {
spinlock_t lock; void __iomem *regs;
/** * struct atmel_mci_slot - MMC slot state * @mmc: The mmc_host representing this slot. * @host: The MMC controller this slot is using. * @sdc_reg: Value of SDCR to be written before using this slot. * @sdio_irq: SDIO irq mask for this slot. * @mrq: mmc_request currently being processed or waiting to be * processed, or NULL when the slot is idle. * @queue_node: List node for placing this node in the @queue list of * &struct atmel_mci. * @clock: Clock rate configured by set_ios(). Protected by host->lock. * @flags: Random state bits associated with the slot. * @detect_pin: GPIO pin used for card detection, or negative if not * available. * @wp_pin: GPIO pin used for card write protect sending, or negative * if not available. * @detect_timer: Timer used for debouncing @detect_pin interrupts.
*/ struct atmel_mci_slot { struct mmc_host *mmc; struct atmel_mci *host;
buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM;
pm_runtime_get_sync(dev);
/* * Grab a more or less consistent snapshot. Note that we're * not disabling interrupts, so IMR and SR may not be * consistent.
*/
spin_lock_bh(&host->lock);
memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);
spin_unlock_bh(&host->lock);
/* * Fix sconfig's burst size according to atmel MCI. We need to convert them as: * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. * With version 0x600, we need to convert them as: 1 -> 0, 2 -> 1, 4 -> 2, * 8 -> 3, 16 -> 4. * * This can be done by finding most significant bit set.
*/ staticinlineunsignedint atmci_convert_chksize(struct atmel_mci *host, unsignedint maxburst)
{ unsignedint version = atmci_get_version(host); unsignedint offset = 2;
if (host->mrq->cmd->data) {
host->mrq->cmd->data->error = -ETIMEDOUT;
host->data = NULL; /* * With some SDIO modules, sometimes DMA transfer hangs. If * stop_transfer() is not called then the DMA request is not * removed, following ones are queued and never computed.
*/ if (host->state == STATE_DATA_XFER)
host->stop_transfer(host);
} else {
host->mrq->cmd->error = -ETIMEDOUT;
host->cmd = NULL;
}
host->need_reset = 1;
host->state = STATE_END_REQUEST;
smp_wmb();
queue_work(system_bh_wq, &host->bh_work);
}
staticinlineunsignedint atmci_ns_to_clocks(struct atmel_mci *host, unsignedint ns)
{ /* * It is easier here to use us instead of ns for the timeout, * it prevents from overflows during calculation.
*/ unsignedint us = DIV_ROUND_UP(ns, 1000);
/* Maximum clock frequency is host->bus_hz/2 */ return us * (DIV_ROUND_UP(host->bus_hz, 2000000));
}
/* * Return mask with command flags to be enabled for this command.
*/ static u32 atmci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
{ struct mmc_data *data;
u32 cmdr;
cmd->error = -EINPROGRESS;
cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);
if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136)
cmdr |= ATMCI_CMDR_RSPTYP_136BIT; else
cmdr |= ATMCI_CMDR_RSPTYP_48BIT;
}
/* * This should really be MAXLAT_5 for CMD2 and ACMD41, but * it's too difficult to determine whether this is an ACMD or * not. Better make it 64.
*/
cmdr |= ATMCI_CMDR_MAXLAT_64CYC;
if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN)
cmdr |= ATMCI_CMDR_OPDCMD;
data = cmd->data; if (data) {
cmdr |= ATMCI_CMDR_START_XFER;
if (host->data_size <= buf_size) { if (host->data_size & 0x3) { /* If size is different from modulo 4, transfer bytes */
atmci_writel(host, counter_reg, host->data_size);
atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE);
} else { /* Else transfer 32-bits words */
atmci_writel(host, counter_reg, host->data_size / 4);
}
host->data_size = 0;
} else { /* We assume the size of a page is 32-bits aligned */
atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4);
host->data_size -= sg_dma_len(host->sg); if (host->data_size)
host->sg = sg_next(host->sg);
}
}
/* * Configure PDC buffer according to the data size ie configuring one or two * buffers. Don't use this function if you want to configure only the second * buffer. In this case, use atmci_pdc_set_single_buf.
*/ staticvoid atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)
{
atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF); if (host->data_size)
atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF);
}
/* * Unmap sg lists, called when transfer is finished.
*/ staticvoid atmci_pdc_cleanup(struct atmel_mci *host)
{ struct mmc_data *data = host->data; struct device *dev = host->dev;
if (data)
dma_unmap_sg(dev, data->sg, data->sg_len, mmc_get_dma_dir(data));
}
/* * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY * interrupt needed for both transfer directions.
*/ staticvoid atmci_pdc_complete(struct atmel_mci *host)
{ struct device *dev = host->dev; int transfer_size = host->data->blocks * host->data->blksz; int i;
if ((!host->caps.has_rwproof)
&& (host->data->flags & MMC_DATA_READ)) { if (host->caps.has_bad_data_ordering) for (i = 0; i < transfer_size; i++)
host->buffer[i] = swab32(host->buffer[i]);
sg_copy_from_buffer(host->data->sg, host->data->sg_len,
host->buffer, transfer_size);
}
if (data)
dma_unmap_sg(host->dma.chan->device->dev,
data->sg, data->sg_len,
mmc_get_dma_dir(data));
}
/* * This function is called by the DMA driver from bh context.
*/ staticvoid atmci_dma_complete(void *arg)
{ struct atmel_mci *host = arg; struct mmc_data *data = host->data; struct device *dev = host->dev;
dev_vdbg(dev, "DMA complete\n");
if (host->caps.has_dma_conf_reg) /* Disable DMA hardware handshaking on MCI */
atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);
atmci_dma_cleanup(host);
/* * If the card was removed, data will be NULL. No point trying * to send the stop command or waiting for NBUSY in this case.
*/ if (data) {
dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
queue_work(system_bh_wq, &host->bh_work);
/* * Regardless of what the documentation says, we have * to wait for NOTBUSY even after block read * operations. * * When the DMA transfer is complete, the controller * may still be reading the CRC from the card, i.e. * the data transfer is still in progress and we * haven't seen all the potential error bits yet. * * The interrupt handler will schedule a different * bh work to finish things up when the data transfer * is completely done. * * We may not complete the mmc request here anyway * because the mmc layer may call back and cause us to * violate the "don't submit new operations from the * completion callback" rule of the dma engine * framework.
*/
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
}
/* * Returns a mask of interrupt flags to be enabled after the whole * request has been prepared.
*/ static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
{
u32 iflags;
/* * Errata: MMC data write operation with less than 12 * bytes is impossible. * * Errata: MCI Transmit Data Register (TDR) FIFO * corruption when length is not multiple of 4.
*/ if (data->blocks * data->blksz < 12
|| (data->blocks * data->blksz) & 3)
host->need_reset = true;
/* * Set interrupt flags and set block length into the MCI mode register even * if this value is also accessible in the MCI block register. It seems to be * necessary before the High Speed MCI version. It also map sg and configure * PDC registers.
*/ static u32
atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
{ struct device *dev = host->dev;
u32 iflags, tmp; int i;
if ((!host->caps.has_rwproof)
&& (host->data->flags & MMC_DATA_WRITE)) {
sg_copy_to_buffer(host->data->sg, host->data->sg_len,
host->buffer, host->data_size); if (host->caps.has_bad_data_ordering) for (i = 0; i < host->data_size; i++)
host->buffer[i] = swab32(host->buffer[i]);
}
/* * We don't do DMA on "complex" transfers, i.e. with * non-word-aligned buffers or lengths. Also, we don't bother * with all the DMA setup overhead for short transfers.
*/ if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD) return atmci_prepare_data(host, data); if (data->blksz & 3) return atmci_prepare_data(host, data);
if (chan) {
dmaengine_terminate_all(chan);
atmci_dma_cleanup(host);
} else { /* Data transfer was stopped by the interrupt handler */
dev_dbg(dev, "(%s) set pending xfer complete\n", __func__);
atmci_set_pending(host, EVENT_XFER_COMPLETE);
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
}
}
/* * Start a request: prepare data if needed, prepare the command and activate * interrupts.
*/ staticvoid atmci_start_request(struct atmel_mci *host, struct atmel_mci_slot *slot)
{ struct device *dev = host->dev; struct mmc_request *mrq; struct mmc_command *cmd; struct mmc_data *data;
u32 iflags;
u32 cmdflags;
/* * DMA transfer should be started before sending the command to avoid * unexpected errors especially for read operations in SDIO mode. * Unfortunately, in PDC mode, command has to be sent before starting * the transfer.
*/ if (host->submit_data != &atmci_submit_data_dma)
atmci_send_command(host, cmd, cmdflags);
if (data)
host->submit_data(host, data);
if (host->submit_data == &atmci_submit_data_dma)
atmci_send_command(host, cmd, cmdflags);
/* * We could have enabled interrupts earlier, but I suspect * that would open up a nice can of interesting race * conditions (e.g. command and data complete, but stop not * prepared yet.)
*/
atmci_writel(host, ATMCI_IER, iflags);
}
/* * We may "know" the card is gone even though there's still an * electrical connection. If so, we really need to communicate * this to the MMC core since there won't be any more * interrupts as the card is completely removed. Otherwise, * the MMC core might believe the card is still there even * though the card was just removed very slowly.
*/ if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) {
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq); return;
}
/* We don't support multiple blocks of weird lengths. */
data = mrq->data; if (data && data->blocks > 1 && data->blksz & 3) {
mrq->cmd->error = -EINVAL;
mmc_request_done(mmc, mrq);
}
slot->sdc_reg &= ~ATMCI_SDCBUS_MASK; switch (ios->bus_width) { case MMC_BUS_WIDTH_1:
slot->sdc_reg |= ATMCI_SDCBUS_1BIT; break; case MMC_BUS_WIDTH_4:
slot->sdc_reg |= ATMCI_SDCBUS_4BIT; break; case MMC_BUS_WIDTH_8:
slot->sdc_reg |= ATMCI_SDCBUS_8BIT; break;
}
if (ios->clock) { unsignedint clock_min = ~0U; int clkdiv;
spin_lock_bh(&host->lock); if (!host->mode_reg) {
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
}
/* * Use mirror of ios->clock to prevent race with mmc * core ios update when finding the minimum.
*/
slot->clock = ios->clock; for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { if (host->slot[i] && host->slot[i]->clock
&& host->slot[i]->clock < clock_min)
clock_min = host->slot[i]->clock;
}
/* * WRPROOF and RDPROOF prevent overruns/underruns by * stopping the clock when the FIFO is full/empty. * This state is not expected to last for long.
*/ if (host->caps.has_rwproof)
host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF);
if (host->caps.has_cfg_reg) { /* setup High Speed mode in relation with card capacity */ if (ios->timing == MMC_TIMING_SD_HS)
host->cfg_reg |= ATMCI_CFG_HSMODE; else
host->cfg_reg &= ~ATMCI_CFG_HSMODE;
}
if (list_empty(&host->queue)) {
atmci_writel(host, ATMCI_MR, host->mode_reg); if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
} else {
host->need_clock_update = true;
}
/* * Update the MMC clock rate if necessary. This may be * necessary if set_ios() is called when a different slot is * busy transferring data.
*/ if (host->need_clock_update) {
atmci_writel(host, ATMCI_MR, host->mode_reg); if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
}
/* * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before * freeing the interrupt. We must not re-enable the interrupt * if it has been freed, and if we're shutting down, it * doesn't really matter whether the card is present or not.
*/
smp_rmb(); if (test_bit(ATMCI_SHUTDOWN, &slot->flags)) return;
do {
prev_state = state;
dev_dbg(dev, "FSM: state=%d\n", state);
switch (state) { case STATE_IDLE: break;
case STATE_SENDING_CMD: /* * Command has been sent, we are waiting for command * ready. Then we have three next states possible: * END_REQUEST by default, WAITING_NOTBUSY if it's a * command needing it or DATA_XFER if there is data.
*/
dev_dbg(dev, "FSM: cmd ready?\n"); if (!atmci_test_and_clear_pending(host,
EVENT_CMD_RDY)) break;
dev_dbg(dev, "set completed cmd ready\n");
host->cmd = NULL;
atmci_set_completed(host, EVENT_CMD_RDY);
atmci_command_complete(host, mrq->cmd); if (mrq->data) {
dev_dbg(dev, "command with data transfer\n"); /* * If there is a command error don't start * data transfer.
*/ if (mrq->cmd->error) {
host->stop_transfer(host);
host->data = NULL;
atmci_writel(host, ATMCI_IDR,
ATMCI_TXRDY | ATMCI_RXRDY
| ATMCI_DATA_ERROR_FLAGS);
state = STATE_END_REQUEST;
} else
state = STATE_DATA_XFER;
} elseif ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
dev_dbg(dev, "command response need waiting notbusy\n");
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
} else
state = STATE_END_REQUEST;
break;
case STATE_DATA_XFER: if (atmci_test_and_clear_pending(host,
EVENT_DATA_ERROR)) {
dev_dbg(dev, "set completed data error\n");
atmci_set_completed(host, EVENT_DATA_ERROR);
state = STATE_END_REQUEST; break;
}
/* * A data transfer is in progress. The event expected * to move to the next state depends of data transfer * type (PDC or DMA). Once transfer done we can move * to the next step which is WAITING_NOTBUSY in write * case and directly SENDING_STOP in read case.
*/
dev_dbg(dev, "FSM: xfer complete?\n"); if (!atmci_test_and_clear_pending(host,
EVENT_XFER_COMPLETE)) break;
dev_dbg(dev, "(%s) set completed xfer complete\n", __func__);
atmci_set_completed(host, EVENT_XFER_COMPLETE);
case STATE_WAITING_NOTBUSY: /* * We can be in the state for two reasons: a command * requiring waiting not busy signal (stop command * included) or a write operation. In the latest case, * we need to send a stop command.
*/
dev_dbg(dev, "FSM: not busy?\n"); if (!atmci_test_and_clear_pending(host,
EVENT_NOTBUSY)) break;
dev_dbg(dev, "set completed not busy\n");
atmci_set_completed(host, EVENT_NOTBUSY);
if (host->data) { /* * For some commands such as CMD53, even if * there is data transfer, there is no stop * command to send.
*/ if (host->mrq->stop) {
atmci_send_stop_cmd(host, data);
state = STATE_SENDING_STOP;
} else {
host->data = NULL;
data->bytes_xfered = data->blocks
* data->blksz;
data->error = 0;
state = STATE_END_REQUEST;
}
} else
state = STATE_END_REQUEST; break;
case STATE_SENDING_STOP: /* * In this state, it is important to set host->data to * NULL (which is tested in the waiting notbusy state) * in order to go to the end request state instead of * sending stop again.
*/
dev_dbg(dev, "FSM: cmd ready?\n"); if (!atmci_test_and_clear_pending(host,
EVENT_CMD_RDY)) break;
if (pending & ATMCI_TXBUFE) {
dev_dbg(dev, "IRQ: tx buffer empty\n");
atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); /* * We can receive this interruption before having configured * the second pdc buffer, so we need to reconfigure first and * second buffers again
*/ if (host->data_size) {
atmci_pdc_set_both_buf(host, XFER_TRANSMIT);
atmci_writel(host, ATMCI_IER, ATMCI_ENDTX);
atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE);
} else {
atmci_pdc_complete(host);
}
} elseif (pending & ATMCI_ENDTX) {
dev_dbg(dev, "IRQ: end of tx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
if (pending & ATMCI_RXBUFF) {
dev_dbg(dev, "IRQ: rx buffer full\n");
atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); /* * We can receive this interruption before having configured * the second pdc buffer, so we need to reconfigure first and * second buffers again
*/ if (host->data_size) {
atmci_pdc_set_both_buf(host, XFER_RECEIVE);
atmci_writel(host, ATMCI_IER, ATMCI_ENDRX);
atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF);
} else {
atmci_pdc_complete(host);
}
} elseif (pending & ATMCI_ENDRX) {
dev_dbg(dev, "IRQ: end of rx buffer\n");
atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
/* * First mci IPs, so mainly the ones having pdc, have some * issues with the notbusy signal. You can't get it after * data transmission if you have not sent a stop command. * The appropriate workaround is to use the BLKE signal.
*/ if (pending & ATMCI_BLKE) {
dev_dbg(dev, "IRQ: blke\n");
atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
smp_wmb();
dev_dbg(dev, "set pending notbusy\n");
atmci_set_pending(host, EVENT_NOTBUSY);
queue_work(system_bh_wq, &host->bh_work);
}
/* * Disable interrupts until the pin has stabilized and check * the state then. Use mod_timer() since we may be in the * middle of the timer routine when this interrupt triggers.
*/
disable_irq_nosync(irq);
mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20));
mmc->ops = &atmci_ops;
mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);
mmc->f_max = host->bus_hz / 2;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; if (sdio_irq)
mmc->caps |= MMC_CAP_SDIO_IRQ; if (host->caps.has_highspeed)
mmc->caps |= MMC_CAP_SD_HIGHSPEED; /* * Without the read/write proof capability, it is strongly suggested to * use only one bit for data to prevent fifo underruns and overruns * which will corrupt data.
*/
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.19 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.