/* Copyright 2008 - 2016 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* * BTW, the drivers (and h/w programming model) already obtain the required * synchronisation for portal accesses and data-dependencies. Use of barrier()s * or other order-preserving primitives simply degrade performance. Hence the * use of the __raw_*() interfaces, which simply ensure that the compiler treats * the portal registers as volatile
*/
struct qm_mc { union qm_mc_command *cr; union qm_mc_result *rr;
u8 rridx, vbit; #ifdef CONFIG_FSL_DPAA_CHECKING enum { /* Can be _mc_start()ed */
qman_mc_idle, /* Can be _mc_commit()ed or _mc_abort()ed */
qman_mc_user, /* Can only be _mc_retry()ed */
qman_mc_hw
} state; #endif
};
struct qm_addr { void *ce; /* cache-enabled */
__be32 *ce_be; /* same value as above but for direct access */ void __iomem *ci; /* cache-inhibited */
};
struct qm_portal { /* * In the non-CONFIG_FSL_DPAA_CHECKING case, the following stuff up to * and including 'mc' fits within a cacheline (yay!). The 'config' part * is setup-only, so isn't a cause for a concern. In other words, don't * rearrange this structure on a whim, there be dragons ...
*/ struct qm_addr addr; struct qm_eqcr eqcr; struct qm_dqrr dqrr; struct qm_mr mr; struct qm_mc mc;
} ____cacheline_aligned;
/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */ staticstruct qm_eqcr_entry *eqcr_carryclear(struct qm_eqcr_entry *p)
{
uintptr_t addr = (uintptr_t)p;
addr &= ~EQCR_CARRY;
return (struct qm_eqcr_entry *)addr;
}
/* Bit-wise logic to convert a ring pointer to a ring index */ staticint eqcr_ptr2idx(struct qm_eqcr_entry *e)
{ return ((uintptr_t)e >> EQCR_SHIFT) & (QM_EQCR_SIZE - 1);
}
/* Increment the 'cursor' ring pointer, taking 'vbit' into account */ staticinlinevoid eqcr_inc(struct qm_eqcr *eqcr)
{ /* increment to the next EQCR pointer and handle overflow and 'vbit' */ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
DPAA_ASSERT(dqrr->pmode == qm_dqrr_pvb); #ifndef CONFIG_FSL_PAMU /* * If PAMU is not available we need to invalidate the cache. * When PAMU is available the cache is updated by stash
*/
dpaa_invalidate_touch_ro(res); #endif if ((res->verb & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1); if (!dqrr->pi)
dqrr->vbit ^= QM_DQRR_VERB_VBIT;
dqrr->fill++;
}
}
staticinlinevoid qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal, conststruct qm_dqrr_entry *dq, int park)
{
__maybe_unused struct qm_dqrr *dqrr = &portal->dqrr; int idx = dqrr_ptr2idx(dq);
mc->cr = portal->addr.ce + QM_CL_CR;
mc->rr = portal->addr.ce + QM_CL_RR0; /* * The expected valid bit polarity for the next CR command is 0 * if RR1 contains a valid response, and is 1 if RR0 contains a * valid response. If both RR contain all 0, this indicates either * that no command has been executed since reset (in which case the * expected valid bit polarity is 1)
*/
rr0 = mc->rr->verb;
rr1 = (mc->rr+1)->verb; if ((rr0 == 0 && rr1 == 0) || rr0 != 0)
mc->rridx = 1; else
mc->rridx = 0;
mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0; #ifdef CONFIG_FSL_DPAA_CHECKING
mc->state = qman_mc_idle; #endif return 0;
}
DPAA_ASSERT(mc->state == qman_mc_hw); /* * The inactive response register's verb byte always returns zero until * its command is submitted and completed. This includes the valid-bit, * in case you were wondering...
*/ if (!rr->verb) {
dpaa_invalidate_touch_ro(rr); return NULL;
}
mc->rridx ^= 1;
mc->vbit ^= QM_MCC_VERB_VBIT; #ifdef CONFIG_FSL_DPAA_CHECKING
mc->state = qman_mc_idle; #endif return rr;
}
staticinlineint qm_mc_result_timeout(struct qm_portal *portal, union qm_mc_result **mcr)
{ int timeout = QM_MCR_TIMEOUT;
do {
*mcr = qm_mc_result(portal); if (*mcr) break;
udelay(1);
} while (--timeout);
int qman_wq_alloc(void)
{
qm_portal_wq = alloc_workqueue("qman_portal_wq", 0, 1); if (!qm_portal_wq) return -ENOMEM; return 0;
}
void qman_enable_irqs(void)
{ int i;
for (i = 0; i < num_possible_cpus(); i++) { if (affine_portals[i]) {
qm_out(&affine_portals[i]->p, QM_REG_ISR, 0xffffffff);
qm_out(&affine_portals[i]->p, QM_REG_IIR, 0);
}
}
}
/* * This is what everything can wait on, even if it migrates to a different cpu * to the one whose affine portal it is waiting on.
*/ static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
staticint drain_mr_fqrni(struct qm_portal *p)
{ constunion qm_mr_entry *msg;
loop:
qm_mr_pvb_update(p);
msg = qm_mr_current(p); if (!msg) { /* * if MR was full and h/w had other FQRNI entries to produce, we * need to allow it time to produce those entries once the * existing entries are consumed. A worst-case situation * (fully-loaded system) means h/w sequencers may have to do 3-4 * other things before servicing the portal's MR pump, each of * which (if slow) may take ~50 qman cycles (which is ~200 * processor cycles). So rounding up and then multiplying this * worst-case estimate by a factor of 10, just to be * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume * one entry at a time, so h/w has an opportunity to produce new * entries well before the ring has been fully consumed, so * we're being *really* paranoid here.
*/
mdelay(1);
qm_mr_pvb_update(p);
msg = qm_mr_current(p); if (!msg) return 0;
} if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) { /* We aren't draining anything but FQRNIs */
pr_err("Found verb 0x%x in MR\n", msg->verb); return -1;
}
qm_mr_next(p);
qm_mr_cci_consume(p, 1); goto loop;
}
#ifdef CONFIG_FSL_PAMU /* PAMU is required for stashing */
portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ? 1 : 0); #else
portal->use_eqcr_ci_stashing = 0; #endif /* * prep the low-level portal struct with the mapped addresses from the * config, everything that follows depends on it and "config" is more * for (de)reference
*/
p->addr.ce = c->addr_virt_ce;
p->addr.ce_be = c->addr_virt_ce;
p->addr.ci = c->addr_virt_ci; /* * If CI-stashing is used, the current defaults use a threshold of 3, * and stash with high-than-DQRR priority.
*/ if (qm_eqcr_init(p, qm_eqcr_pvb,
portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
dev_err(c->dev, "EQCR initialisation failed\n"); goto fail_eqcr;
} if (qm_dqrr_init(p, c, qm_dqrr_dpush, qm_dqrr_pvb,
qm_dqrr_cdc, DQRR_MAXFILL)) {
dev_err(c->dev, "DQRR initialisation failed\n"); goto fail_dqrr;
} if (qm_mr_init(p, qm_mr_pvb, qm_mr_cci)) {
dev_err(c->dev, "MR initialisation failed\n"); goto fail_mr;
} if (qm_mc_init(p)) {
dev_err(c->dev, "MC initialisation failed\n"); goto fail_mc;
} /* static interrupt-gating controls */
qm_dqrr_set_ithresh(p, QMAN_PIRQ_DQRR_ITHRESH);
qm_mr_set_ithresh(p, QMAN_PIRQ_MR_ITHRESH);
qm_out(p, QM_REG_ITPR, QMAN_PIRQ_IPERIOD);
portal->cgrs = kmalloc_array(2, sizeof(*portal->cgrs), GFP_KERNEL); if (!portal->cgrs) goto fail_cgrs; /* initial snapshot is no-depletion */
qman_cgrs_init(&portal->cgrs[1]); if (cgrs)
portal->cgrs[0] = *cgrs; else /* if the given mask is NULL, assume all CGRs can be seen */
qman_cgrs_fill(&portal->cgrs[0]);
INIT_LIST_HEAD(&portal->cgr_cbs);
raw_spin_lock_init(&portal->cgr_lock);
INIT_WORK(&portal->congestion_work, qm_congestion_task);
INIT_WORK(&portal->mr_work, qm_mr_process_task);
portal->bits = 0;
portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
isdr = 0xffffffff;
qm_out(p, QM_REG_ISDR, isdr);
portal->irq_sources = 0;
qm_out(p, QM_REG_IER, 0);
snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
qm_out(p, QM_REG_IIR, 1); if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
dev_err(c->dev, "request_irq() failed\n"); goto fail_irq;
}
if (dpaa_set_portal_irq_affinity(c->dev, c->irq, c->cpu)) goto fail_affinity;
/* Need EQCR to be empty before continuing */
isdr &= ~QM_PIRQ_EQCI;
qm_out(p, QM_REG_ISDR, isdr);
ret = qm_eqcr_get_fill(p); if (ret) {
dev_err(c->dev, "EQCR unclean\n"); goto fail_eqcr_empty;
}
isdr &= ~(QM_PIRQ_DQRI | QM_PIRQ_MRI);
qm_out(p, QM_REG_ISDR, isdr); if (qm_dqrr_current(p)) {
dev_dbg(c->dev, "DQRR unclean\n");
qm_dqrr_cdc_consume_n(p, 0xffff);
} if (qm_mr_current(p) && drain_mr_fqrni(p)) { /* special handling, drain just in case it's a few FQRNIs */ constunion qm_mr_entry *e = qm_mr_current(p);
/* Stop dequeues on the portal */
qm_dqrr_sdqcr_set(&qm->p, 0);
/* * NB we do this to "quiesce" EQCR. If we add enqueue-completions or * something related to QM_PIRQ_EQCI, this may need fixing. * Also, due to the prefetching model used for CI updates in the enqueue * path, this update will only invalidate the CI cacheline *after* * working on it, so we need to call this twice to ensure a full update * irrespective of where the enqueue processing was at when the teardown * began.
*/
qm_eqcr_cce_update(&qm->p);
qm_eqcr_cce_update(&qm->p);
pcfg = qm->config;
/* * remove some slowish-path stuff from the "fast path" and make sure it isn't * inlined.
*/ static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
{
p->vdqcr_owned = NULL;
fq_clear(fq, QMAN_FQ_STATE_VDQCR);
wake_up(&affine_queue);
}
/* * The only states that would conflict with other things if they ran at the * same time on the same cpu are: * * (i) setting/clearing vdqcr_owned, and * (ii) clearing the NE (Not Empty) flag. * * Both are safe. Because; * * (i) this clearing can only occur after qman_volatile_dequeue() has set the * vdqcr_owned field (which it does before setting VDQCR), and * qman_volatile_dequeue() blocks interrupts and preemption while this is * done so that we can't interfere. * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as * with (i) that API prevents us from interfering until it's safe. * * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett * advantage comes from this function not having to "lock" anything at all. * * Note also that the callbacks are invoked at points which are safe against the * above potential conflicts, but that this function itself is not re-entrant * (this is because the function tracks one end of each FIFO in the portal and * we do *not* want to lock that). So the consequence is that it is safe for * user callbacks to call into any QMan API.
*/ staticinlineunsignedint __poll_portal_fast(struct qman_portal *p, unsignedint poll_limit, bool sched_napi)
{ conststruct qm_dqrr_entry *dq; struct qman_fq *fq; enum qman_cb_dqrr_result res; unsignedint limit = 0;
do {
qm_dqrr_pvb_update(&p->p);
dq = qm_dqrr_current(&p->p); if (!dq) break;
if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) { /* * VDQCR: don't trust context_b as the FQ may have * been configured for h/w consumption and we're * draining it post-retirement.
*/
fq = p->vdqcr_owned; /* * We only set QMAN_FQ_STATE_NE when retiring, so we * only need to check for clearing it when doing * volatile dequeues. It's one less thing to check * in the critical path (SDQCR).
*/ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
fq_clear(fq, QMAN_FQ_STATE_NE); /* * This is duplicated from the SDQCR code, but we * have stuff to do before *and* after this callback, * and we don't want multiple if()s in the critical * path (SDQCR).
*/
res = fq->cb.dqrr(p, fq, dq, sched_napi); if (res == qman_cb_dqrr_stop) break; /* Check for VDQCR completion */ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
clear_vdqcr(p, fq);
} else { /* SDQCR: context_b points to the FQ */
fq = tag_to_fq(be32_to_cpu(dq->context_b)); /* Now let the callback do its stuff */
res = fq->cb.dqrr(p, fq, dq, sched_napi); /* * The callback can request that we exit without * consuming this entry nor advancing;
*/ if (res == qman_cb_dqrr_stop) break;
} /* Interpret 'dq' from a driver perspective. */ /* * Parking isn't possible unless HELDACTIVE was set. NB, * FORCEELIGIBLE implies HELDACTIVE, so we only need to * check for HELDACTIVE to cover both.
*/
DPAA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
(res != qman_cb_dqrr_park)); /* just means "skip it, I'll consume it myself later on" */ if (res != qman_cb_dqrr_defer)
qm_dqrr_cdc_consume_1ptr(&p->p, dq,
res == qman_cb_dqrr_park); /* Move forward */
qm_dqrr_next(&p->p); /* * Entry processed and consumed, increment our counter. The * callback can request that we exit after consuming the * entry, and we also exit if we reach our processing limit, * so loop back only if neither of these conditions is met.
*/
} while (++limit < poll_limit && res != qman_cb_dqrr_consume_stop);
/* * Our interrupt handler only processes+clears status register bits that * are in p->irq_sources. As we're trimming that mask, if one of them * were to assert in the status register just before we remove it from * the enable register, there would be an interrupt-storm when we * release the IRQ lock. So we wait for the enable register update to * take effect in h/w (by reading it back) and then clear all other bits * in the status register. Ie. we clear them from ISR once it's certain * IER won't allow them to reassert.
*/
local_irq_save(irqflags);
bits &= QM_PIRQ_VISIBLE;
p->irq_sources &= ~bits;
qm_out(&p->p, QM_REG_IER, p->irq_sources);
ier = qm_in(&p->p, QM_REG_IER); /* * Using "~ier" (rather than "bits" or "~p->irq_sources") creates a * data-dependency, ie. to protect against re-ordering.
*/
qm_out(&p->p, QM_REG_ISR, ~ier);
local_irq_restore(irqflags);
}
EXPORT_SYMBOL(qman_p_irqsource_remove);
staticconstchar *mcr_result_str(u8 result)
{ switch (result) { case QM_MCR_RESULT_NULL: return"QM_MCR_RESULT_NULL"; case QM_MCR_RESULT_OK: return"QM_MCR_RESULT_OK"; case QM_MCR_RESULT_ERR_FQID: return"QM_MCR_RESULT_ERR_FQID"; case QM_MCR_RESULT_ERR_FQSTATE: return"QM_MCR_RESULT_ERR_FQSTATE"; case QM_MCR_RESULT_ERR_NOTEMPTY: return"QM_MCR_RESULT_ERR_NOTEMPTY"; case QM_MCR_RESULT_PENDING: return"QM_MCR_RESULT_PENDING"; case QM_MCR_RESULT_ERR_BADCOMMAND: return"QM_MCR_RESULT_ERR_BADCOMMAND";
} return"";
}
int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
{ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) { int ret = qman_alloc_fqid(&fqid);
/* A context_b of 0 is allegedly special, so don't use that fqid */ if (fqid == 0 || fqid >= num_fqids) {
WARN(1, "bad fqid %d\n", fqid); return -EINVAL;
}
fq->idx = fqid * 2; if (flags & QMAN_FQ_FLAG_NO_MODIFY)
fq->idx++;
void qman_destroy_fq(struct qman_fq *fq)
{ /* * We don't need to lock the FQ as it is a pre-condition that the FQ be * quiesced. Instead, run some checks.
*/ switch (fq->state) { case qman_fq_state_parked: case qman_fq_state_oos: if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
qman_release_fqid(fq->fqid);
if (fq->state != qman_fq_state_oos &&
fq->state != qman_fq_state_parked) return -EINVAL; #ifdef CONFIG_FSL_DPAA_CHECKING if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) return -EINVAL; #endif if (opts && (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_OAC)) { /* And can't be set at the same time as TDTHRESH */ if (be16_to_cpu(opts->we_mask) & QM_INITFQ_WE_TDTHRESH) return -EINVAL;
} /* Issue an INITFQ_[PARKED|SCHED] management command */
p = get_affine_portal(); if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
(fq->state != qman_fq_state_oos &&
fq->state != qman_fq_state_parked)) {
ret = -EBUSY; goto out;
}
mcc = qm_mc_start(&p->p); if (opts)
mcc->initfq = *opts;
qm_fqid_set(&mcc->fq, fq->fqid);
mcc->initfq.count = 0; /* * If the FQ does *not* have the TO_DCPORTAL flag, context_b is set as a * demux pointer. Otherwise, the caller-provided value is allowed to * stand, don't overwrite it.
*/ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
dma_addr_t phys_fq;
mcc->initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTB);
mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq)); /* * and the physical address - NB, if the user wasn't trying to * set CONTEXTA, clear the stashing settings.
*/ if (!(be16_to_cpu(mcc->initfq.we_mask) &
QM_INITFQ_WE_CONTEXTA)) {
mcc->initfq.we_mask |=
cpu_to_be16(QM_INITFQ_WE_CONTEXTA);
memset(&mcc->initfq.fqd.context_a, 0, sizeof(mcc->initfq.fqd.context_a));
} else { struct qman_portal *p = qman_dma_portal;
phys_fq = dma_map_single(p->config->dev, fq, sizeof(*fq), DMA_TO_DEVICE); if (dma_mapping_error(p->config->dev, phys_fq)) {
dev_err(p->config->dev, "dma_mapping failed\n");
ret = -EIO; goto out;
}
qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
}
} if (flags & QMAN_INITFQ_FLAG_LOCAL) { int wq = 0;
int qman_retire_fq(struct qman_fq *fq, u32 *flags)
{ union qm_mc_command *mcc; union qm_mc_result *mcr; struct qman_portal *p; int ret;
u8 res;
if (fq->state != qman_fq_state_parked &&
fq->state != qman_fq_state_sched) return -EINVAL; #ifdef CONFIG_FSL_DPAA_CHECKING if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) return -EINVAL; #endif
p = get_affine_portal(); if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) ||
fq->state == qman_fq_state_retired ||
fq->state == qman_fq_state_oos) {
ret = -EBUSY; goto out;
}
mcc = qm_mc_start(&p->p);
qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE); if (!qm_mc_result_timeout(&p->p, &mcr)) {
dev_crit(p->config->dev, "ALTER_RETIRE timeout\n");
ret = -ETIMEDOUT; goto out;
}
DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
res = mcr->result; /* * "Elegant" would be to treat OK/PENDING the same way; set CHANGING, * and defer the flags until FQRNI or FQRN (respectively) show up. But * "Friendly" is to process OK immediately, and not set CHANGING. We do * friendly, otherwise the caller doesn't necessarily have a fully * "retired" FQ on return even if the retirement was immediate. However * this does mean some code duplication between here and * fq_state_change().
*/ if (res == QM_MCR_RESULT_OK) {
ret = 0; /* Process 'fq' right away, we'll ignore FQRNI */ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
fq_set(fq, QMAN_FQ_STATE_NE); if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
fq_set(fq, QMAN_FQ_STATE_ORL); if (flags)
*flags = fq->flags;
fq->state = qman_fq_state_retired; if (fq->cb.fqs) { /* * Another issue with supporting "immediate" retirement * is that we're forced to drop FQRNIs, because by the * time they're seen it may already be "too late" (the * fq may have been OOS'd and free()'d already). But if * the upper layer wants a callback whether it's * immediate or not, we have to fake a "MR" entry to * look like an FQRNI...
*/ union qm_mr_entry msg;
int qman_oos_fq(struct qman_fq *fq)
{ union qm_mc_command *mcc; union qm_mc_result *mcr; struct qman_portal *p; int ret = 0;
if (fq->state != qman_fq_state_retired) return -EINVAL; #ifdef CONFIG_FSL_DPAA_CHECKING if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) return -EINVAL; #endif
p = get_affine_portal(); if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) ||
fq->state != qman_fq_state_retired) {
ret = -EBUSY; goto out;
}
mcc = qm_mc_start(&p->p);
qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS); if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT; goto out;
}
DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS); if (mcr->result != QM_MCR_RESULT_OK) {
ret = -EIO; goto out;
}
fq->state = qman_fq_state_oos;
out:
put_affine_portal(); return ret;
}
EXPORT_SYMBOL(qman_oos_fq);
int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
{ union qm_mc_command *mcc; union qm_mc_result *mcr; struct qman_portal *p = get_affine_portal(); int ret = 0;
mcc = qm_mc_start(&p->p);
qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ); if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT; goto out;
}
int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
{ union qm_mc_command *mcc; union qm_mc_result *mcr; struct qman_portal *p = get_affine_portal(); int ret = 0;
mcc = qm_mc_start(&p->p);
qm_fqid_set(&mcc->fq, fq->fqid);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP); if (!qm_mc_result_timeout(&p->p, &mcr)) {
ret = -ETIMEDOUT; goto out;
}
DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP); if (mcr->result == QM_MCR_RESULT_OK)
*np = mcr->queryfq_np; elseif (mcr->result == QM_MCR_RESULT_ERR_FQID)
ret = -ERANGE; else
ret = -EIO;
out:
put_affine_portal(); return ret;
}
EXPORT_SYMBOL(qman_query_fq_np);
staticint qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
{ union qm_mc_command *mcc; union qm_mc_result *mcr; struct qman_portal *p = get_affine_portal(); int ret = 0;
/* internal function used as a wait_event() expression */ staticint set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
{ unsignedlong irqflags; int ret = -EBUSY;
local_irq_save(irqflags); if (p->vdqcr_owned) goto out; if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) goto out;
staticint wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
u32 vdqcr, u32 flags)
{ int ret = 0;
if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
ret = wait_event_interruptible(affine_queue,
!set_vdqcr(p, fq, vdqcr)); else
wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr)); return ret;
}
int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr)
{ struct qman_portal *p; int ret;
if (fq->state != qman_fq_state_parked &&
fq->state != qman_fq_state_retired) return -EINVAL; if (vdqcr & QM_VDQCR_FQID_MASK) return -EINVAL; if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) return -EBUSY;
vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; if (flags & QMAN_VOLATILE_FLAG_WAIT)
ret = wait_vdqcr_start(&p, fq, vdqcr, flags); else
ret = set_vdqcr(&p, fq, vdqcr); if (ret) return ret; /* VDQCR is set */ if (flags & QMAN_VOLATILE_FLAG_FINISH) { if (flags & QMAN_VOLATILE_FLAG_WAIT_INT) /* * NB: don't propagate any error - the caller wouldn't * know whether the VDQCR was issued or not. A signal * could arrive after returning anyway, so the caller * can check signal_pending() if that's an issue.
*/
wait_event_interruptible(affine_queue,
!fq_isset(fq, QMAN_FQ_STATE_VDQCR)); else
wait_event(affine_queue,
!fq_isset(fq, QMAN_FQ_STATE_VDQCR));
} return 0;
}
EXPORT_SYMBOL(qman_volatile_dequeue);
p = get_affine_portal();
local_irq_save(irqflags);
if (p->use_eqcr_ci_stashing) { /* * The stashing case is easy, only update if we need to in * order to try and liberate ring entries.
*/
eq = qm_eqcr_start_stash(&p->p);
} else { /* * The non-stashing case is harder, need to prefetch ahead of * time.
*/
avail = qm_eqcr_get_avail(&p->p); if (avail < 2)
update_eqcr_ci(p, avail);
eq = qm_eqcr_start_no_stash(&p->p);
}
void qman_init_cgr_all(void)
{ struct qman_cgr cgr; int err_cnt = 0;
for (cgr.cgrid = 0; cgr.cgrid < CGR_NUM; cgr.cgrid++) { if (qm_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL))
err_cnt++;
}
if (err_cnt)
pr_err("Warning: %d error%s while initialising CGR h/w\n",
err_cnt, (err_cnt > 1) ? "s" : "");
}
int qman_create_cgr(struct qman_cgr *cgr, u32 flags, struct qm_mcc_initcgr *opts)
{ struct qm_mcr_querycgr cgr_state; int ret; struct qman_portal *p;
/* * We have to check that the provided CGRID is within the limits of the * data-structures, for obvious reasons. However we'll let h/w take * care of determining whether it's within the limits of what exists on * the SoC.
*/ if (cgr->cgrid >= CGR_NUM) return -EINVAL;
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.30 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.