qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG); if (!reg) {
pr_err("qbman: the portal is not enabled!\n");
kfree(p); return NULL;
}
if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
} /* * SDQCR needs to be initialized to 0 when no channels are * being dequeued from or else the QMan HW will indicate an * error. The values that were calculated above will be * applied when dequeues from a specific channel are enabled.
*/
qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
/* Initialize the software portal with a irq timeout period of 0us */
qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0);
return p;
}
/** * qbman_swp_finish() - Create and destroy a functional object representing * the given QBMan portal descriptor. * @p: the qbman_swp object to be destroyed
*/ void qbman_swp_finish(struct qbman_swp *p)
{
kfree(p);
}
/** * qbman_swp_interrupt_read_status() * @p: the given software portal * * Return the value in the SWP_ISR register.
*/
u32 qbman_swp_interrupt_read_status(struct qbman_swp *p)
{ return qbman_read_register(p, QBMAN_CINH_SWP_ISR);
}
/** * qbman_swp_interrupt_clear_status() * @p: the given software portal * @mask: The mask to clear in SWP_ISR register
*/ void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask)
{
qbman_write_register(p, QBMAN_CINH_SWP_ISR, mask);
}
/** * qbman_swp_interrupt_get_trigger() - read interrupt enable register * @p: the given software portal * * Return the value in the SWP_IER register.
*/
u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p)
{ return qbman_read_register(p, QBMAN_CINH_SWP_IER);
}
/** * qbman_swp_interrupt_set_trigger() - enable interrupts for a swp * @p: the given software portal * @mask: The mask of bits to enable in SWP_IER
*/ void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask)
{
qbman_write_register(p, QBMAN_CINH_SWP_IER, mask);
}
/** * qbman_swp_interrupt_get_inhibit() - read interrupt mask register * @p: the given software portal object * * Return the value in the SWP_IIR register.
*/ int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
{ return qbman_read_register(p, QBMAN_CINH_SWP_IIR);
}
/** * qbman_swp_interrupt_set_inhibit() - write interrupt mask register * @p: the given software portal object * @inhibit: whether to inhibit the IRQs
*/ void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
{
qbman_write_register(p, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0);
}
/* * Different management commands all use this common base layer of code to issue * commands and poll for results.
*/
/* * Returns a pointer to where the caller should fill in their management command * (caller should ignore the verb byte)
*/ void *qbman_swp_mc_start(struct qbman_swp *p)
{ if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) return qbman_get_cmd(p, QBMAN_CENA_SWP_CR); else return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
}
/* * Commits merges in the caller-supplied command verb (which should not include * the valid-bit) and submits the command to hardware
*/ void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
{
u8 *v = cmd;
/* * Checks for a completed response (returns non-NULL if only if the response * is complete).
*/ void *qbman_swp_mc_result(struct qbman_swp *p)
{
u32 *ret, verb;
if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); /* Remove the valid-bit - command completed if the rest * is non-zero.
*/
verb = ret[0] & ~QB_VALID_BIT; if (!verb) return NULL;
p->mc.valid_bit ^= QB_VALID_BIT;
} else {
ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM); /* Command completed if the valid bit is toggled */ if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT)) return NULL; /* Command completed if the rest is non-zero */
verb = ret[0] & ~QB_VALID_BIT; if (!verb) return NULL;
p->mr.valid_bit ^= QB_VALID_BIT;
}
/* * qbman_eq_desc_clear() - Clear the contents of a descriptor to * default/starting state.
*/ void qbman_eq_desc_clear(struct qbman_eq_desc *d)
{
memset(d, 0, sizeof(*d));
}
/** * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp * @d: the enqueue descriptor. * @respond_success: 1 = enqueue with response always; 0 = enqueue with * rejections returned on a FQ.
*/ void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
{
d->verb &= ~(1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT); if (respond_success)
d->verb |= enqueue_response_always; else
d->verb |= enqueue_rejects_to_fq;
}
/* * Exactly one of the following descriptor "targets" should be set. (Calling any * one of these will replace the effect of any prior call to one of these.) * -enqueue to a frame queue * -enqueue to a queuing destination
*/
/** * qbman_eq_desc_set_fq() - set the FQ for the enqueue command * @d: the enqueue descriptor * @fqid: the id of the frame queue to be enqueued
*/ void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid)
{
d->verb &= ~(1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT);
d->tgtid = cpu_to_le32(fqid);
}
/** * qbman_eq_desc_set_qd() - Set Queuing Destination for the enqueue command * @d: the enqueue descriptor * @qdid: the id of the queuing destination to be enqueued * @qd_bin: the queuing destination bin * @qd_prio: the queuing destination priority
*/ void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
u32 qd_bin, u32 qd_prio)
{
d->verb |= 1 << QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT;
d->tgtid = cpu_to_le32(qdid);
d->qdbin = cpu_to_le16(qd_bin);
d->qpri = qd_prio;
}
#define QB_RT_BIT ((u32)0x100) /** * qbman_swp_enqueue_direct() - Issue an enqueue command * @s: the software portal used for enqueue * @d: the enqueue descriptor * @fd: the frame descriptor to be enqueued * * Please note that 'fd' should only be NULL if the "action" of the * descriptor is "orp_hole" or "orp_nesn". * * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
*/ static int qbman_swp_enqueue_direct(struct qbman_swp *s, conststruct qbman_eq_desc *d, conststruct dpaa2_fd *fd)
{ int flags = 0; int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
if (ret >= 0)
ret = 0; else
ret = -EBUSY; return ret;
}
/** * qbman_swp_enqueue_mem_back() - Issue an enqueue command * @s: the software portal used for enqueue * @d: the enqueue descriptor * @fd: the frame descriptor to be enqueued * * Please note that 'fd' should only be NULL if the "action" of the * descriptor is "orp_hole" or "orp_nesn". * * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
*/ static int qbman_swp_enqueue_mem_back(struct qbman_swp *s, conststruct qbman_eq_desc *d, conststruct dpaa2_fd *fd)
{ int flags = 0; int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
if (ret >= 0)
ret = 0; else
ret = -EBUSY; return ret;
}
/** * qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command * using one enqueue descriptor * @s: the software portal used for enqueue * @d: the enqueue descriptor * @fd: table pointer of frame descriptor table to be enqueued * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL * @num_frames: number of fd to be enqueued * * Return the number of fd enqueued, or a negative error number.
*/ static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s, conststruct qbman_eq_desc *d, conststruct dpaa2_fd *fd,
uint32_t *flags, int num_frames)
{
uint32_t *p = NULL; const uint32_t *cl = (uint32_t *)d;
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; int i, num_enqueued = 0;
eqcr_pi = s->eqcr.pi;
num_enqueued = (s->eqcr.available < num_frames) ?
s->eqcr.available : num_frames;
s->eqcr.available -= num_enqueued; /* Fill in the EQCR ring */ for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); /* Skip copying the verb */
memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
&fd[i], sizeof(*fd));
eqcr_pi++;
}
dma_wmb();
/* Set the verb byte, have to substitute in the valid-bit */
eqcr_pi = s->eqcr.pi; for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb; if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
/* Flush all the cacheline without load/store in between */
eqcr_pi = s->eqcr.pi; for (i = 0; i < num_enqueued; i++)
eqcr_pi++;
s->eqcr.pi = eqcr_pi & full_mask;
spin_unlock(&s->access_spinlock);
return num_enqueued;
}
/** * qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command * using one enqueue descriptor * @s: the software portal used for enqueue * @d: the enqueue descriptor * @fd: table pointer of frame descriptor table to be enqueued * @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL * @num_frames: number of fd to be enqueued * * Return the number of fd enqueued, or a negative error number.
*/ static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s, conststruct qbman_eq_desc *d, conststruct dpaa2_fd *fd,
uint32_t *flags, int num_frames)
{
uint32_t *p = NULL; const uint32_t *cl = (uint32_t *)(d);
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; int i, num_enqueued = 0; unsignedlong irq_flags;
eqcr_pi = s->eqcr.pi;
num_enqueued = (s->eqcr.available < num_frames) ?
s->eqcr.available : num_frames;
s->eqcr.available -= num_enqueued; /* Fill in the EQCR ring */ for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)); /* Skip copying the verb */
memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
&fd[i], sizeof(*fd));
eqcr_pi++;
}
/* Set the verb byte, have to substitute in the valid-bit */
eqcr_pi = s->eqcr.pi; for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb; if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) { struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
/** * qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command * using multiple enqueue descriptor * @s: the software portal used for enqueue * @d: table of minimal enqueue descriptor * @fd: table pointer of frame descriptor table to be enqueued * @num_frames: number of fd to be enqueued * * Return the number of fd enqueued, or a negative error number.
*/ static int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s, conststruct qbman_eq_desc *d, conststruct dpaa2_fd *fd, int num_frames)
{
uint32_t *p; const uint32_t *cl;
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; int i, num_enqueued = 0;
eqcr_pi = s->eqcr.pi;
num_enqueued = (s->eqcr.available < num_frames) ?
s->eqcr.available : num_frames;
s->eqcr.available -= num_enqueued; /* Fill in the EQCR ring */ for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]); /* Skip copying the verb */
memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
&fd[i], sizeof(*fd));
eqcr_pi++;
}
dma_wmb();
/* Set the verb byte, have to substitute in the valid-bit */
eqcr_pi = s->eqcr.pi; for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]);
p[0] = cl[0] | s->eqcr.pi_vb;
eqcr_pi++; if (!(eqcr_pi & half_mask))
s->eqcr.pi_vb ^= QB_VALID_BIT;
}
/* Flush all the cacheline without load/store in between */
eqcr_pi = s->eqcr.pi; for (i = 0; i < num_enqueued; i++)
eqcr_pi++;
s->eqcr.pi = eqcr_pi & full_mask;
return num_enqueued;
}
/** * qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command * using multiple enqueue descriptor * @s: the software portal used for enqueue * @d: table of minimal enqueue descriptor * @fd: table pointer of frame descriptor table to be enqueued * @num_frames: number of fd to be enqueued * * Return the number of fd enqueued, or a negative error number.
*/ static int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s, conststruct qbman_eq_desc *d, conststruct dpaa2_fd *fd, int num_frames)
{
uint32_t *p; const uint32_t *cl;
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask; int i, num_enqueued = 0;
eqcr_pi = s->eqcr.pi;
num_enqueued = (s->eqcr.available < num_frames) ?
s->eqcr.available : num_frames;
s->eqcr.available -= num_enqueued; /* Fill in the EQCR ring */ for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]); /* Skip copying the verb */
memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
&fd[i], sizeof(*fd));
eqcr_pi++;
}
/* Set the verb byte, have to substitute in the valid-bit */
eqcr_pi = s->eqcr.pi; for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]);
p[0] = cl[0] | s->eqcr.pi_vb;
eqcr_pi++; if (!(eqcr_pi & half_mask))
s->eqcr.pi_vb ^= QB_VALID_BIT;
}
/** * qbman_swp_push_get() - Get the push dequeue setup * @s: the software portal object * @channel_idx: the channel index to query * @enabled: returned boolean to show whether the push dequeue is enabled * for the given channel
*/ void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
{
u16 src = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK;
/* Read make the complete src map. If no channels are enabled * the SDQCR must be 0 or else QMan will assert errors
*/
dqsrc = (s->sdq >> QB_SDQCR_SRC_SHIFT) & QB_SDQCR_SRC_MASK; if (dqsrc != 0)
qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, s->sdq); else
qbman_write_register(s, QBMAN_CINH_SWP_SDQCR, 0);
}
/** * qbman_pull_desc_clear() - Clear the contents of a descriptor to * default/starting state * @d: the pull dequeue descriptor to be cleared
*/ void qbman_pull_desc_clear(struct qbman_pull_desc *d)
{
memset(d, 0, sizeof(*d));
}
/** * qbman_pull_desc_set_storage()- Set the pull dequeue storage * @d: the pull dequeue descriptor to be set * @storage: the pointer of the memory to store the dequeue result * @storage_phys: the physical address of the storage memory * @stash: to indicate whether write allocate is enabled * * If not called, or if called with 'storage' as NULL, the result pull dequeues * will produce results to DQRR. If 'storage' is non-NULL, then results are * produced to the given memory location (using the DMA address which * the caller provides in 'storage_phys'), and 'stash' controls whether or not * those writes to main-memory express a cache-warming attribute.
*/ void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, struct dpaa2_dq *storage,
dma_addr_t storage_phys, int stash)
{ /* save the virtual address */
d->rsp_addr_virt = (u64)(uintptr_t)storage;
/** * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued * @d: the pull dequeue descriptor to be set * @numframes: number of frames to be set, must be between 1 and 16, inclusive
*/ void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
{
d->numf = numframes - 1;
}
/* * Exactly one of the following descriptor "actions" should be set. (Calling any * one of these will replace the effect of any prior call to one of these.) * - pull dequeue from the given frame queue (FQ) * - pull dequeue from any FQ in the given work queue (WQ) * - pull dequeue from any FQ in any WQ in the given channel
*/
/** * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues * @d: the pull dequeue descriptor to be set * @fqid: the frame queue index of the given FQ
*/ void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
{
d->verb |= 1 << QB_VDQCR_VERB_DCT_SHIFT;
d->verb |= qb_pull_dt_framequeue << QB_VDQCR_VERB_DT_SHIFT;
d->dq_src = cpu_to_le32(fqid);
}
/** * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues * @d: the pull dequeue descriptor to be set * @wqid: composed of channel id and wqid within the channel * @dct: the dequeue command type
*/ void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid, enum qbman_pull_type_e dct)
{
d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
d->verb |= qb_pull_dt_workqueue << QB_VDQCR_VERB_DT_SHIFT;
d->dq_src = cpu_to_le32(wqid);
}
/** * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command * dequeues * @d: the pull dequeue descriptor to be set * @chid: the channel id to be dequeued * @dct: the dequeue command type
*/ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid, enum qbman_pull_type_e dct)
{
d->verb |= dct << QB_VDQCR_VERB_DCT_SHIFT;
d->verb |= qb_pull_dt_channel << QB_VDQCR_VERB_DT_SHIFT;
d->dq_src = cpu_to_le32(chid);
}
/** * qbman_swp_pull_direct() - Issue the pull dequeue command * @s: the software portal object * @d: the software portal descriptor which has been configured with * the set of qbman_pull_desc_set_*() calls * * Return 0 for success, and -EBUSY if the software portal is not ready * to do pull dequeue.
*/ static int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
{ struct qbman_pull_desc *p;
if (!atomic_dec_and_test(&s->vdq.available)) {
atomic_inc(&s->vdq.available); return -EBUSY;
}
s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR); else
p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
p->numf = d->numf;
p->tok = QMAN_DQ_TOKEN_VALID;
p->dq_src = d->dq_src;
p->rsp_addr = d->rsp_addr;
p->rsp_addr_virt = d->rsp_addr_virt;
dma_wmb(); /* Set the verb byte, have to substitute in the valid-bit */
p->verb = d->verb | s->vdq.valid_bit;
s->vdq.valid_bit ^= QB_VALID_BIT;
return 0;
}
/** * qbman_swp_pull_mem_back() - Issue the pull dequeue command * @s: the software portal object * @d: the software portal descriptor which has been configured with * the set of qbman_pull_desc_set_*() calls * * Return 0 for success, and -EBUSY if the software portal is not ready * to do pull dequeue.
*/ static int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
{ struct qbman_pull_desc *p;
/* Set the verb byte, have to substitute in the valid-bit */
p->verb = d->verb | s->vdq.valid_bit;
s->vdq.valid_bit ^= QB_VALID_BIT;
dma_wmb();
qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
return 0;
}
#define QMAN_DQRR_PI_MASK 0xf
/** * qbman_swp_dqrr_next_direct() - Get an valid DQRR entry * @s: the software portal object * * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry * only once, so repeated calls can return a sequence of DQRR entries, without * requiring they be consumed immediately or in any particular order.
*/ conststruct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
{
u32 verb;
u32 response_verb;
u32 flags; struct dpaa2_dq *p;
/* Before using valid-bit to detect if something is there, we have to * handle the case of the DQRR reset bug...
*/ if (unlikely(s->dqrr.reset_bug)) { /* * We pick up new entries by cache-inhibited producer index, * which means that a non-coherent mapping would require us to * invalidate and read *only* once that PI has indicated that * there's an entry here. The first trip around the DQRR ring * will be much less efficient than all subsequent trips around * it...
*/
u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
QMAN_DQRR_PI_MASK;
/* there are new entries if pi != next_idx */ if (pi == s->dqrr.next_idx) return NULL;
/* * if next_idx is/was the last ring index, and 'pi' is * different, we can disable the workaround as all the ring * entries have now been DMA'd to so valid-bit checking is * repaired. Note: this logic needs to be based on next_idx * (which increments one at a time), rather than on pi (which * can burst and wrap-around between our snapshots of it).
*/ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
s->dqrr.next_idx, pi);
s->dqrr.reset_bug = 0;
}
prefetch(qbman_get_cmd(s,
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
}
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
verb = p->dq.verb;
/* * If the valid-bit isn't of the expected polarity, nothing there. Note, * in the DQRR reset bug workaround, we shouldn't need to skip these * check, because we've already determined that a new entry is available * and we've invalidated the cacheline before reading it, so the * valid-bit behaviour is repaired and should tell us what we already * knew from reading PI.
*/ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
prefetch(qbman_get_cmd(s,
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); return NULL;
} /* * There's something there. Move "next_idx" attention to the next ring * entry (and prefetch it) before returning what we found.
*/
s->dqrr.next_idx++;
s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ if (!s->dqrr.next_idx)
s->dqrr.valid_bit ^= QB_VALID_BIT;
/* * If this is the final response to a volatile dequeue command * indicate that the vdq is available
*/
flags = p->dq.stat;
response_verb = verb & QBMAN_RESULT_MASK; if ((response_verb == QBMAN_RESULT_DQ) &&
(flags & DPAA2_DQ_STAT_VOLATILE) &&
(flags & DPAA2_DQ_STAT_EXPIRED))
atomic_inc(&s->vdq.available);
/** * qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry * @s: the software portal object * * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry * only once, so repeated calls can return a sequence of DQRR entries, without * requiring they be consumed immediately or in any particular order.
*/ conststruct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
{
u32 verb;
u32 response_verb;
u32 flags; struct dpaa2_dq *p;
/* Before using valid-bit to detect if something is there, we have to * handle the case of the DQRR reset bug...
*/ if (unlikely(s->dqrr.reset_bug)) { /* * We pick up new entries by cache-inhibited producer index, * which means that a non-coherent mapping would require us to * invalidate and read *only* once that PI has indicated that * there's an entry here. The first trip around the DQRR ring * will be much less efficient than all subsequent trips around * it...
*/
u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
QMAN_DQRR_PI_MASK;
/* there are new entries if pi != next_idx */ if (pi == s->dqrr.next_idx) return NULL;
/* * if next_idx is/was the last ring index, and 'pi' is * different, we can disable the workaround as all the ring * entries have now been DMA'd to so valid-bit checking is * repaired. Note: this logic needs to be based on next_idx * (which increments one at a time), rather than on pi (which * can burst and wrap-around between our snapshots of it).
*/ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
s->dqrr.next_idx, pi);
s->dqrr.reset_bug = 0;
}
prefetch(qbman_get_cmd(s,
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
}
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
verb = p->dq.verb;
/* * If the valid-bit isn't of the expected polarity, nothing there. Note, * in the DQRR reset bug workaround, we shouldn't need to skip these * check, because we've already determined that a new entry is available * and we've invalidated the cacheline before reading it, so the * valid-bit behaviour is repaired and should tell us what we already * knew from reading PI.
*/ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
prefetch(qbman_get_cmd(s,
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); return NULL;
} /* * There's something there. Move "next_idx" attention to the next ring * entry (and prefetch it) before returning what we found.
*/
s->dqrr.next_idx++;
s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ if (!s->dqrr.next_idx)
s->dqrr.valid_bit ^= QB_VALID_BIT;
/* * If this is the final response to a volatile dequeue command * indicate that the vdq is available
*/
flags = p->dq.stat;
response_verb = verb & QBMAN_RESULT_MASK; if ((response_verb == QBMAN_RESULT_DQ) &&
(flags & DPAA2_DQ_STAT_VOLATILE) &&
(flags & DPAA2_DQ_STAT_EXPIRED))
atomic_inc(&s->vdq.available);
/** * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from * qbman_swp_dqrr_next(). * @s: the software portal object * @dq: the DQRR entry to be consumed
*/ void qbman_swp_dqrr_consume(struct qbman_swp *s, conststruct dpaa2_dq *dq)
{
qbman_write_register(s, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq));
}
/** * qbman_result_has_new_result() - Check and get the dequeue response from the * dq storage memory set in pull dequeue command * @s: the software portal object * @dq: the dequeue result read from the memory * * Return 1 for getting a valid dequeue result, or 0 for not getting a valid * dequeue result. * * Only used for user-provided storage of dequeue results, not DQRR. For * efficiency purposes, the driver will perform any required endianness * conversion to ensure that the user's dequeue result storage is in host-endian * format. As such, once the user has called qbman_result_has_new_result() and * been returned a valid dequeue result, they should not call it again on * the same memory location (except of course if another dequeue command has * been executed to produce a new result to that location).
*/ int qbman_result_has_new_result(struct qbman_swp *s, conststruct dpaa2_dq *dq)
{ if (dq->dq.tok != QMAN_DQ_TOKEN_VALID) return 0;
/* * Set token to be 0 so we will detect change back to 1 * next time the looping is traversed. Const is cast away here * as we want users to treat the dequeue responses as read only.
*/
((struct dpaa2_dq *)dq)->dq.tok = 0;
/* * Determine whether VDQCR is available based on whether the * current result is sitting in the first storage location of * the busy command.
*/ if (s->vdq.storage == dq) {
s->vdq.storage = NULL;
atomic_inc(&s->vdq.available);
}
return 1;
}
/** * qbman_release_desc_clear() - Clear the contents of a descriptor to * default/starting state. * @d: the pull dequeue descriptor to be cleared
*/ void qbman_release_desc_clear(struct qbman_release_desc *d)
{
memset(d, 0, sizeof(*d));
d->verb = 1 << 5; /* Release Command Valid */
}
/** * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to * @d: the pull dequeue descriptor to be set * @bpid: the bpid value to be set
*/ void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
{
d->bpid = cpu_to_le16(bpid);
}
/** * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI * interrupt source should be asserted after the release command is completed. * @d: the pull dequeue descriptor to be set * @enable: enable (1) or disable (0) value
*/ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
{ if (enable)
d->verb |= 1 << 6; else
d->verb &= ~(1 << 6);
}
/** * qbman_swp_release_direct() - Issue a buffer release command * @s: the software portal object * @d: the release descriptor * @buffers: a pointer pointing to the buffer address to be released * @num_buffers: number of buffers to be released, must be less than 8 * * Return 0 for success, -EBUSY if the release command ring is not ready.
*/ int qbman_swp_release_direct(struct qbman_swp *s, conststruct qbman_release_desc *d, const u64 *buffers, unsignedint num_buffers)
{ int i; struct qbman_release_desc *p;
u32 rar;
if (!num_buffers || (num_buffers > 7)) return -EINVAL;
rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR); if (!RAR_SUCCESS(rar)) return -EBUSY;
/* Start the release command */
p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
/* Copy the caller's buffer pointers to the command */ for (i = 0; i < num_buffers; i++)
p->buf[i] = cpu_to_le64(buffers[i]);
p->bpid = d->bpid;
/* * Set the verb byte, have to substitute in the valid-bit * and the number of buffers.
*/
dma_wmb();
p->verb = d->verb | RAR_VB(rar) | num_buffers;
return 0;
}
/** * qbman_swp_release_mem_back() - Issue a buffer release command * @s: the software portal object * @d: the release descriptor * @buffers: a pointer pointing to the buffer address to be released * @num_buffers: number of buffers to be released, must be less than 8 * * Return 0 for success, -EBUSY if the release command ring is not ready.
*/ int qbman_swp_release_mem_back(struct qbman_swp *s, conststruct qbman_release_desc *d, const u64 *buffers, unsignedint num_buffers)
{ int i; struct qbman_release_desc *p;
u32 rar;
if (!num_buffers || (num_buffers > 7)) return -EINVAL;
rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR); if (!RAR_SUCCESS(rar)) return -EBUSY;
/* Start the release command */
p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
/* Copy the caller's buffer pointers to the command */ for (i = 0; i < num_buffers; i++)
p->buf[i] = cpu_to_le64(buffers[i]);
p->bpid = d->bpid;
/** * qbman_swp_acquire() - Issue a buffer acquire command * @s: the software portal object * @bpid: the buffer pool index * @buffers: a pointer pointing to the acquired buffer addresses * @num_buffers: number of buffers to be acquired, must be less than 8 * * Return 0 for success, or negative error code if the acquire command * fails.
*/ int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers, unsignedint num_buffers)
{ struct qbman_acquire_desc *p; struct qbman_acquire_rslt *r; int i;
if (!num_buffers || (num_buffers > 7)) return -EINVAL;
/* Start the management command */
p = qbman_swp_mc_start(s);
/** * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters * @p: the software portal object * @irq_threshold: interrupt threshold (an IRQ is generated when there are more * DQRR entries in the portal than the threshold) * @irq_holdoff: interrupt holdoff (timeout) period in us
*/ void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
u32 *irq_holdoff)
{ if (irq_threshold)
*irq_threshold = p->irq_threshold; if (irq_holdoff)
*irq_holdoff = p->irq_holdoff;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.