/* * * This should be called with the QP r_lock held. * * The s_lock will be acquired around the hfi1_migrate_qp() call.
*/ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
{
__be64 guid; unsignedlong flags; struct rvt_qp *qp = packet->qp;
u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
u32 dlid = packet->dlid;
u32 slid = packet->slid;
u32 sl = packet->sl; bool migrated = packet->migrated;
u16 pkey = packet->pkey;
if (qp->s_mig_state == IB_MIG_ARMED && migrated) { if (!packet->grh) { if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
IB_AH_GRH) &&
(packet->etype != RHF_RCV_TYPE_BYPASS)) return 1;
} else { conststruct ib_global_route *grh;
if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
IB_AH_GRH)) return 1;
grh = rdma_ah_read_grh(&qp->alt_ah_attr);
guid = get_sguid(ibp, grh->sgid_index); if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
guid)) return 1; if (!gid_ok(
&packet->grh->sgid,
grh->dgid.global.subnet_prefix,
grh->dgid.global.interface_id)) return 1;
} if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
sc5, slid))) {
hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
slid, dlid); return 1;
} /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */ if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) ||
ppd_from_ibp(ibp)->port !=
rdma_ah_get_port_num(&qp->alt_ah_attr)) return 1;
spin_lock_irqsave(&qp->s_lock, flags);
hfi1_migrate_qp(qp);
spin_unlock_irqrestore(&qp->s_lock, flags);
} else { if (!packet->grh) { if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
IB_AH_GRH) &&
(packet->etype != RHF_RCV_TYPE_BYPASS)) return 1;
} else { conststruct ib_global_route *grh;
if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
IB_AH_GRH)) return 1;
grh = rdma_ah_read_grh(&qp->remote_ah_attr);
guid = get_sguid(ibp, grh->sgid_index); if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
guid)) return 1; if (!gid_ok(
&packet->grh->sgid,
grh->dgid.global.subnet_prefix,
grh->dgid.global.interface_id)) return 1;
} if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
sc5, slid))) {
hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
slid, dlid); return 1;
} /* Validate the SLID. See Ch. 9.6.1.5 */ if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) ||
ppd_from_ibp(ibp)->port != qp->port_num) return 1; if (qp->s_mig_state == IB_MIG_REARM && !migrated)
qp->s_mig_state = IB_MIG_ARMED;
}
return 0;
}
/** * hfi1_make_grh - construct a GRH header * @ibp: a pointer to the IB port * @hdr: a pointer to the GRH header being constructed * @grh: the global route address to send to * @hwords: size of header after grh being sent in dwords * @nwords: the number of 32 bit words of data being sent * * Return the size of the header in 32 bit words.
*/
u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr, conststruct ib_global_route *grh, u32 hwords, u32 nwords)
{
hdr->version_tclass_flow =
cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
(grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
(grh->flow_label << IB_GRH_FLOW_SHIFT));
hdr->paylen = cpu_to_be16((hwords + nwords) << 2); /* next_hdr is defined by C8-7 in ch. 8.4.1 */
hdr->next_hdr = IB_GRH_NEXT_HDR;
hdr->hop_limit = grh->hop_limit; /* The SGID is 32-bit aligned. */
hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
hdr->sgid.global.interface_id =
grh->sgid_index < HFI1_GUIDS_PER_PORT ?
get_sguid(ibp, grh->sgid_index) :
get_sguid(ibp, HFI1_PORT_GUID_INDEX);
hdr->dgid = grh->dgid;
/** * build_ahg - create ahg in s_ahg * @qp: a pointer to QP * @npsn: the next PSN for the request/response * * This routine handles the AHG by allocating an ahg entry and causing the * copy of the first middle. * * Subsequent middles use the copied entry, editing the * PSN with 1 or 2 edits.
*/ staticinlinevoid build_ahg(struct rvt_qp *qp, u32 npsn)
{ struct hfi1_qp_priv *priv = qp->priv;
if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
clear_ahg(qp); if (!(qp->s_flags & HFI1_S_AHG_VALID)) { /* first middle that needs copy */ if (qp->s_ahgidx < 0)
qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde); if (qp->s_ahgidx >= 0) {
qp->s_ahgpsn = npsn;
priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY; /* save to protect a change in another thread */
priv->s_ahg->ahgidx = qp->s_ahgidx;
qp->s_flags |= HFI1_S_AHG_VALID;
}
} else { /* subsequent middle after valid */ if (qp->s_ahgidx >= 0) {
priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
priv->s_ahg->ahgidx = qp->s_ahgidx;
priv->s_ahg->ahgcount++;
priv->s_ahg->ahgdesc[0] =
sdma_build_ahg_descriptor(
(__force u16)cpu_to_be16((u16)npsn),
BTH2_OFFSET,
16,
16); if ((npsn & 0xffff0000) !=
(qp->s_ahgpsn & 0xffff0000)) {
priv->s_ahg->ahgcount++;
priv->s_ahg->ahgdesc[1] =
sdma_build_ahg_descriptor(
(__force u16)cpu_to_be16(
(u16)(npsn >> 16)),
BTH2_OFFSET,
0,
16);
}
}
}
}
if (qp->s_flags & RVT_S_ECN) {
qp->s_flags &= ~RVT_S_ECN; /* we recently received a FECN, so return a BECN */
becn = true;
middle = 0;
} if (middle)
build_ahg(qp, bth2); else
qp->s_flags &= ~HFI1_S_AHG_VALID;
/* We support only two types - 9B and 16B for now */ staticconst hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = {
[HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B,
[HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B
};
/* * reset s_ahg/AHG fields * * This insures that the ahgentry/ahgcount * are at a non-AHG default to protect * build_verbs_tx_desc() from using * an include ahgidx. * * build_ahg() will modify as appropriate * to use the AHG feature.
*/
priv->s_ahg->tx_flags = 0;
priv->s_ahg->ahgcount = 0;
priv->s_ahg->ahgidx = 0;
/* Make the appropriate header */
hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth1, bth2, middle,
ps);
}
/* when sending, force a reschedule every one of these periods */ #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
/** * hfi1_schedule_send_yield - test for a yield required for QP * send engine * @qp: a pointer to QP * @ps: a pointer to a structure with commonly lookup values for * the send engine progress * @tid: true if it is the tid leg * * This routine checks if the time slice for the QP has expired * for RC QPs, if so an additional work entry is queued. At this * point, other QPs have an opportunity to be scheduled. It * returns true if a yield is required, otherwise, false * is returned.
*/ bool hfi1_schedule_send_yield(struct rvt_qp *qp, struct hfi1_pkt_state *ps, bool tid)
{
ps->pkts_sent = true;
if (unlikely(time_after(jiffies, ps->timeout))) { if (!ps->in_thread ||
workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
spin_lock_irqsave(&qp->s_lock, ps->flags); if (!tid) {
qp->s_flags &= ~RVT_S_BUSY;
hfi1_schedule_send(qp);
} else { struct hfi1_qp_priv *priv = qp->priv;
/** * hfi1_do_send - perform a send on a QP * @qp: a pointer to the QP * @in_thread: true if in a workqueue thread * * Process entries in the send work queue until credit or queue is * exhausted. Only allow one CPU to send a packet per QP. * Otherwise, two threads could send packets out of order.
*/ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
{ struct hfi1_pkt_state ps; struct hfi1_qp_priv *priv = qp->priv; int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
/* Return if we are already busy processing a work request. */ if (!hfi1_send_ok(qp)) { if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
spin_unlock_irqrestore(&qp->s_lock, ps.flags); return;
}
/* insure a pre-built packet is handled */
ps.s_txreq = get_waiting_verbs_txreq(ps.wait); do { /* Check for a constructed packet to be sent. */ if (ps.s_txreq) { if (priv->s_flags & HFI1_S_TID_BUSY_SET)
qp->s_flags |= RVT_S_BUSY;
spin_unlock_irqrestore(&qp->s_lock, ps.flags); /* * If the packet cannot be sent now, return and * the send engine will be woken up later.
*/ if (hfi1_verbs_send(qp, &ps)) return;
/* allow other tasks to run */ if (hfi1_schedule_send_yield(qp, &ps, false)) return;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.