// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2009 Intel Corporation. All rights reserved. * * Maintained at www.Open-FCoE.org
*/
if (state < ARRAY_SIZE(fcoe_ctlr_states))
cp = fcoe_ctlr_states[state]; if (!cp)
cp = "unknown"; return cp;
}
/** * fcoe_ctlr_set_state() - Set and do debug printing for the new FIP state. * @fip: The FCoE controller * @state: The new state
*/ staticvoid fcoe_ctlr_set_state(struct fcoe_ctlr *fip, enum fip_state state)
{ if (state == fip->state) return; if (fip->lp)
LIBFCOE_FIP_DBG(fip, "state %s -> %s\n",
fcoe_ctlr_state(fip->state), fcoe_ctlr_state(state));
fip->state = state;
}
/** * fcoe_ctlr_mtu_valid() - Check if a FCF's MTU is valid * @fcf: The FCF to check * * Return non-zero if FCF fcoe_size has been validated.
*/ staticinlineint fcoe_ctlr_mtu_valid(conststruct fcoe_fcf *fcf)
{ return (fcf->flags & FIP_FL_SOL) != 0;
}
/** * fcoe_ctlr_fcf_usable() - Check if a FCF is usable * @fcf: The FCF to check * * Return non-zero if the FCF is usable.
*/ staticinlineint fcoe_ctlr_fcf_usable(struct fcoe_fcf *fcf)
{
u16 flags = FIP_FL_SOL | FIP_FL_AVAIL;
return (fcf->flags & flags) == flags;
}
/** * fcoe_ctlr_map_dest() - Set flag and OUI for mapping destination addresses * @fip: The FCoE controller
*/ staticvoid fcoe_ctlr_map_dest(struct fcoe_ctlr *fip)
{ if (fip->mode == FIP_MODE_VN2VN)
hton24(fip->dest_addr, FIP_VN_FC_MAP); else
hton24(fip->dest_addr, FIP_DEF_FC_MAP);
hton24(fip->dest_addr + 3, 0);
fip->map_dest = 1;
}
/* * If ctlr_dev doesn't exist then it means we're a libfcoe user * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device. * fnic would be an example of a driver with this behavior. In this * case we want to add the fcoe_fcf to the fcoe_ctlr list, but we * don't want to make sysfs changes.
*/
/* * The fcoe_sysfs layer can return a CONNECTED fcf that * has a priv (fcf was never deleted) or a CONNECTED fcf * that doesn't have a priv (fcf was deleted). However, * libfcoe will always delete FCFs before trying to add * them. This is ensured because both recv_adv and * age_fcfs are protected by the the fcoe_ctlr's mutex. * This means that we should never get a FCF with a * non-NULL priv pointer.
*/
BUG_ON(fcf_dev->priv);
/** * fcoe_sysfs_fcf_del() - Remove a fcoe_fcf{,_device} to a fcoe_ctlr{,_device} * @new: The FCF to be removed * * Called with fip->ctlr_mutex held
*/ staticvoid fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
{ struct fcoe_ctlr *fip = new->fip; struct fcoe_ctlr_device *cdev; struct fcoe_fcf_device *fcf_dev;
list_del(&new->list);
fip->fcf_count--;
/* * If ctlr_dev doesn't exist then it means we're a libfcoe user * who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device * or a fcoe_fcf_device. * * fnic would be an example of a driver with this behavior. In this * case we want to remove the fcoe_fcf from the fcoe_ctlr list (above), * but we don't want to make sysfs changes.
*/
cdev = fcoe_ctlr_to_ctlr_dev(fip); if (cdev) {
mutex_lock(&cdev->lock);
fcf_dev = fcoe_fcf_to_fcf_dev(new);
WARN_ON(!fcf_dev);
new->fcf_dev = NULL;
fcoe_fcf_device_delete(fcf_dev);
mutex_unlock(&cdev->lock);
}
kfree(new);
}
/** * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller * @fip: The FCoE controller whose FCFs are to be reset * * Called with &fcoe_ctlr lock held.
*/ staticvoid fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
{ struct fcoe_fcf *fcf; struct fcoe_fcf *next;
/** * fcoe_ctlr_destroy() - Disable and tear down a FCoE controller * @fip: The FCoE controller to tear down * * This is called by FCoE drivers before freeing the &fcoe_ctlr. * * The receive handler will have been deleted before this to guarantee * that no more recv_work will be scheduled. * * The timer routine will simply return once we set FIP_ST_DISABLED. * This guarantees that no further timeouts or work will be scheduled.
*/ void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
{
cancel_work_sync(&fip->recv_work);
skb_queue_purge(&fip->fip_recv_list);
/** * fcoe_ctlr_announce() - announce new FCF selection * @fip: The FCoE controller * * Also sets the destination MAC for FCoE and control packets * * Called with neither ctlr_mutex nor ctlr_lock held.
*/ staticvoid fcoe_ctlr_announce(struct fcoe_ctlr *fip)
{ struct fcoe_fcf *sel; struct fcoe_fcf *fcf;
spin_unlock_bh(&fip->ctlr_lock);
sel = fip->sel_fcf;
if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr)) goto unlock; if (!is_zero_ether_addr(fip->dest_addr)) {
printk(KERN_NOTICE "libfcoe: host%d: " "FIP Fibre-Channel Forwarder MAC %pM deselected\n",
fip->lp->host->host_no, fip->dest_addr);
eth_zero_addr(fip->dest_addr);
} if (sel) {
printk(KERN_INFO "libfcoe: host%d: FIP selected " "Fibre-Channel Forwarder MAC %pM\n",
fip->lp->host->host_no, sel->fcf_mac);
memcpy(fip->dest_addr, sel->fcoe_mac, ETH_ALEN);
fip->map_dest = 0;
}
unlock:
mutex_unlock(&fip->ctlr_mutex);
}
/** * fcoe_ctlr_fcoe_size() - Return the maximum FCoE size required for VN_Port * @fip: The FCoE controller to get the maximum FCoE size from * * Returns the maximum packet size including the FCoE header and trailer, * but not including any Ethernet or VLAN headers.
*/ staticinline u32 fcoe_ctlr_fcoe_size(struct fcoe_ctlr *fip)
{ /* * Determine the max FCoE frame size allowed, including * FCoE header and trailer. * Note: lp->mfs is currently the payload size, not the frame size.
*/ return fip->lp->mfs + sizeof(struct fc_frame_header) + sizeof(struct fcoe_hdr) + sizeof(struct fcoe_crc_eof);
}
/** * fcoe_ctlr_solicit() - Send a FIP solicitation * @fip: The FCoE controller to send the solicitation on * @fcf: The destination FCF (if NULL, a multicast solicitation is sent)
*/ staticvoid fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
{ struct sk_buff *skb; struct fip_sol { struct ethhdr eth; struct fip_header fip; struct { struct fip_mac_desc mac; struct fip_wwn_desc wwnn; struct fip_size_desc size;
} __packed desc;
} __packed * sol;
u32 fcoe_size;
skb = dev_alloc_skb(sizeof(*sol)); if (!skb) return;
/** * fcoe_ctlr_link_down() - Stop a FCoE controller * @fip: The FCoE controller to be stopped * * Returns non-zero if the link was up and now isn't. * * Called from the LLD when the network link is not ready. * There may be multiple calls while the link is down.
*/ int fcoe_ctlr_link_down(struct fcoe_ctlr *fip)
{ int link_dropped;
if (link_dropped)
fc_linkdown(fip->lp); return link_dropped;
}
EXPORT_SYMBOL(fcoe_ctlr_link_down);
/** * fcoe_ctlr_send_keep_alive() - Send a keep-alive to the selected FCF * @fip: The FCoE controller to send the FKA on * @lport: libfc fc_lport to send from * @ports: 0 for controller keep-alive, 1 for port keep-alive * @sa: The source MAC address * * A controller keep-alive is sent every fka_period (typically 8 seconds). * The source MAC is the native MAC address. * * A port keep-alive is sent every 90 seconds while logged in. * The source MAC is the assigned mapped source address. * The destination is the FCF's F-port.
*/ staticvoid fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, struct fc_lport *lport, int ports, u8 *sa)
{ struct sk_buff *skb; struct fip_kal { struct ethhdr eth; struct fip_header fip; struct fip_mac_desc mac;
} __packed * kal; struct fip_vn_desc *vn;
u32 len; struct fc_lport *lp; struct fcoe_fcf *fcf;
/** * fcoe_ctlr_encaps() - Encapsulate an ELS frame for FIP, without sending it * @fip: The FCoE controller for the ELS frame * @lport: The local port * @dtype: The FIP descriptor type for the frame * @skb: The FCoE ELS frame including FC header but no FCoE headers * @d_id: The destination port ID. * * Returns non-zero error code on failure. * * The caller must check that the length is a multiple of 4. * * The @skb must have enough headroom (28 bytes) and tailroom (8 bytes). * Headroom includes the FIP encapsulation description, FIP header, and * Ethernet header. The tailroom is for the FIP MAC descriptor.
*/ staticint fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
u8 dtype, struct sk_buff *skb, u32 d_id)
{ struct fip_encaps_head { struct ethhdr eth; struct fip_header fip; struct fip_encaps encaps;
} __packed * cap; struct fc_frame_header *fh; struct fip_mac_desc *mac; struct fcoe_fcf *fcf;
size_t dlen;
u16 fip_flags;
u8 op;
fh = (struct fc_frame_header *)skb->data;
op = *(u8 *)(fh + 1);
dlen = sizeof(struct fip_encaps) + skb->len; /* len before push */
cap = skb_push(skb, sizeof(*cap));
memset(cap, 0, sizeof(*cap));
/** * fcoe_ctlr_els_send() - Send an ELS frame encapsulated by FIP if appropriate. * @fip: FCoE controller. * @lport: libfc fc_lport to send from * @skb: FCoE ELS frame including FC header but no FCoE headers. * * Returns a non-zero error code if the frame should not be sent. * Returns zero if the caller should send the frame with FCoE encapsulation. * * The caller must check that the length is a multiple of 4. * The SKB must have enough headroom (28 bytes) and tailroom (8 bytes). * The the skb must also be an fc_frame. * * This is called from the lower-level driver with spinlocks held, * so we must not take a mutex here.
*/ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, struct sk_buff *skb)
{ struct fc_frame *fp; struct fc_frame_header *fh;
u16 old_xid;
u8 op;
u8 mac[ETH_ALEN];
if (op == ELS_FLOGI && fip->mode != FIP_MODE_VN2VN) {
old_xid = fip->flogi_oxid;
fip->flogi_oxid = ntohs(fh->fh_ox_id); if (fip->state == FIP_ST_AUTO) { if (old_xid == FC_XID_UNKNOWN)
fip->flogi_count = 0;
fip->flogi_count++; if (fip->flogi_count < 3) goto drop;
fcoe_ctlr_map_dest(fip); return 0;
} if (fip->state == FIP_ST_NON_FIP)
fcoe_ctlr_map_dest(fip);
}
if (fip->state == FIP_ST_NON_FIP) return 0; if (!fip->sel_fcf && fip->mode != FIP_MODE_VN2VN) goto drop; switch (op) { case ELS_FLOGI:
op = FIP_DT_FLOGI; if (fip->mode == FIP_MODE_VN2VN) break;
spin_lock_bh(&fip->ctlr_lock);
kfree_skb(fip->flogi_req);
fip->flogi_req = skb;
fip->flogi_req_send = 1;
spin_unlock_bh(&fip->ctlr_lock);
schedule_work(&fip->timer_work); return -EINPROGRESS; case ELS_FDISC: if (ntoh24(fh->fh_s_id)) return 0;
op = FIP_DT_FDISC; break; case ELS_LOGO: if (fip->mode == FIP_MODE_VN2VN) { if (fip->state != FIP_ST_VNMP_UP) goto drop; if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI) goto drop;
} else { if (fip->state != FIP_ST_ENABLED) return 0; if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) return 0;
}
op = FIP_DT_LOGO; break; case ELS_LS_ACC: /* * If non-FIP, we may have gotten an SID by accepting an FLOGI * from a point-to-point connection. Switch to using * the source mac based on the SID. The destination * MAC in this case would have been set by receiving the * FLOGI.
*/ if (fip->state == FIP_ST_NON_FIP) { if (fip->flogi_oxid == FC_XID_UNKNOWN) return 0;
fip->flogi_oxid = FC_XID_UNKNOWN;
fc_fcoe_set_mac(mac, fh->fh_d_id);
fip->update_mac(lport, mac);
}
fallthrough; case ELS_LS_RJT:
op = fr_encaps(fp); if (op) break; return 0; default: if (fip->state != FIP_ST_ENABLED &&
fip->state != FIP_ST_VNMP_UP) goto drop; return 0;
}
LIBFCOE_FIP_DBG(fip, "els_send op %u d_id %x\n",
op, ntoh24(fh->fh_d_id)); if (fcoe_ctlr_encaps(fip, lport, op, skb, ntoh24(fh->fh_d_id))) goto drop;
fip->send(fip, skb); return -EINPROGRESS;
drop:
LIBFCOE_FIP_DBG(fip, "drop els_send op %u d_id %x\n",
op, ntoh24(fh->fh_d_id));
kfree_skb(skb); return -EINVAL;
}
EXPORT_SYMBOL(fcoe_ctlr_els_send);
/** * fcoe_ctlr_age_fcfs() - Reset and free all old FCFs for a controller * @fip: The FCoE controller to free FCFs on * * Called with lock held and preemption disabled. * * An FCF is considered old if we have missed two advertisements. * That is, there have been no valid advertisement from it for 2.5 * times its keep-alive period. * * In addition, determine the time when an FCF selection can occur. * * Also, increment the MissDiscAdvCount when no advertisement is received * for the corresponding FCF for 1.5 * FKA_ADV_PERIOD (FC-BB-5 LESB). * * Returns the time in jiffies for the next call.
*/ staticunsignedlong fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
{ struct fcoe_fcf *fcf; struct fcoe_fcf *next; unsignedlong next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); unsignedlong deadline; unsignedlong sel_time = 0; struct list_head del_list;
deadline += fcf->fka_period; if (time_after_eq(jiffies, deadline)) { if (fip->sel_fcf == fcf)
fip->sel_fcf = NULL; /* * Move to delete list so we can call * fcoe_sysfs_fcf_del (which can sleep) * after the put_cpu().
*/
list_del(&fcf->list);
list_add(&fcf->list, &del_list);
this_cpu_inc(fip->lp->stats->VLinkFailureCount);
} else { if (time_after(next_timer, deadline))
next_timer = deadline; if (fcoe_ctlr_mtu_valid(fcf) &&
(!sel_time || time_before(sel_time, fcf->time)))
sel_time = fcf->time;
}
}
list_for_each_entry_safe(fcf, next, &del_list, list) { /* Removes fcf from current list */
fcoe_sysfs_fcf_del(fcf);
}
desc = (struct fip_desc *)(fiph + 1); while (rlen > 0) {
dlen = desc->fip_dlen * FIP_BPW; if (dlen < sizeof(*desc) || dlen > rlen) return -EINVAL; /* Drop Adv if there are duplicate critical descriptors */ if ((desc->fip_dtype < 32) &&
!(desc_mask & 1U << desc->fip_dtype)) {
LIBFCOE_FIP_DBG(fip, "Duplicate Critical " "Descriptors in FIP adv\n"); return -EINVAL;
} switch (desc->fip_dtype) { case FIP_DT_PRI: if (dlen != sizeof(struct fip_pri_desc)) goto len_err;
fcf->pri = ((struct fip_pri_desc *)desc)->fd_pri;
desc_mask &= ~BIT(FIP_DT_PRI); break; case FIP_DT_MAC: if (dlen != sizeof(struct fip_mac_desc)) goto len_err;
memcpy(fcf->fcf_mac,
((struct fip_mac_desc *)desc)->fd_mac,
ETH_ALEN);
memcpy(fcf->fcoe_mac, fcf->fcf_mac, ETH_ALEN); if (!is_valid_ether_addr(fcf->fcf_mac)) {
LIBFCOE_FIP_DBG(fip, "Invalid MAC addr %pM in FIP adv\n",
fcf->fcf_mac); return -EINVAL;
}
desc_mask &= ~BIT(FIP_DT_MAC); break; case FIP_DT_NAME: if (dlen != sizeof(struct fip_wwn_desc)) goto len_err;
wwn = (struct fip_wwn_desc *)desc;
fcf->switch_name = get_unaligned_be64(&wwn->fd_wwn);
desc_mask &= ~BIT(FIP_DT_NAME); break; case FIP_DT_FAB: if (dlen != sizeof(struct fip_fab_desc)) goto len_err;
fab = (struct fip_fab_desc *)desc;
fcf->fabric_name = get_unaligned_be64(&fab->fd_wwn);
fcf->vfid = ntohs(fab->fd_vfid);
fcf->fc_map = ntoh24(fab->fd_map);
desc_mask &= ~BIT(FIP_DT_FAB); break; case FIP_DT_FKA: if (dlen != sizeof(struct fip_fka_desc)) goto len_err;
fka = (struct fip_fka_desc *)desc; if (fka->fd_flags & FIP_FKA_ADV_D)
fcf->fd_flags = 1;
t = ntohl(fka->fd_fka_period); if (t >= FCOE_CTLR_MIN_FKA)
fcf->fka_period = msecs_to_jiffies(t);
desc_mask &= ~BIT(FIP_DT_FKA); break; case FIP_DT_MAP_OUI: case FIP_DT_FCOE_SIZE: case FIP_DT_FLOGI: case FIP_DT_FDISC: case FIP_DT_LOGO: case FIP_DT_ELP: default:
LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP adv\n", desc->fip_dtype); /* standard says ignore unknown descriptors >= 128 */ if (desc->fip_dtype < FIP_DT_NON_CRITICAL) return -EINVAL; break;
}
desc = (struct fip_desc *)((char *)desc + dlen);
rlen -= dlen;
} if (!fcf->fc_map || (fcf->fc_map & 0x10000)) return -EINVAL; if (!fcf->switch_name) return -EINVAL; if (desc_mask) {
LIBFCOE_FIP_DBG(fip, "adv missing descriptors mask %x\n",
desc_mask); return -EINVAL;
} return 0;
len_err:
LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n",
desc->fip_dtype, dlen); return -EINVAL;
}
/** * fcoe_ctlr_recv_adv() - Handle an incoming advertisement * @fip: The FCoE controller receiving the advertisement * @skb: The received FIP packet
*/ staticvoid fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{ struct fcoe_fcf *fcf; struct fcoe_fcf new; unsignedlong sol_tov = msecs_to_jiffies(FCOE_CTLR_SOL_TOV); int first = 0; int mtu_valid; int found = 0; int rc = 0;
if (fcoe_ctlr_parse_adv(fip, skb, &new)) return;
mutex_lock(&fip->ctlr_mutex);
first = list_empty(&fip->fcfs);
list_for_each_entry(fcf, &fip->fcfs, list) { if (fcf->switch_name == new.switch_name &&
fcf->fabric_name == new.fabric_name &&
fcf->fc_map == new.fc_map &&
ether_addr_equal(fcf->fcf_mac, new.fcf_mac)) {
found = 1; break;
}
} if (!found) { if (fip->fcf_count >= FCOE_CTLR_FCF_LIMIT) goto out;
fcf = kmalloc(sizeof(*fcf), GFP_ATOMIC); if (!fcf) goto out;
memcpy(fcf, &new, sizeof(new));
fcf->fip = fip;
rc = fcoe_sysfs_fcf_add(fcf); if (rc) {
printk(KERN_ERR "Failed to allocate sysfs instance " "for FCF, fab %16.16llx mac %pM\n", new.fabric_name, new.fcf_mac);
kfree(fcf); goto out;
}
} else { /* * Update the FCF's keep-alive descriptor flags. * Other flag changes from new advertisements are * ignored after a solicited advertisement is * received and the FCF is selectable (usable).
*/
fcf->fd_flags = new.fd_flags; if (!fcoe_ctlr_fcf_usable(fcf))
fcf->flags = new.flags;
mtu_valid = fcoe_ctlr_mtu_valid(fcf);
fcf->time = jiffies; if (!found)
LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
fcf->fabric_name, fcf->fcf_mac);
/* * If this advertisement is not solicited and our max receive size * hasn't been verified, send a solicited advertisement.
*/ if (!mtu_valid)
fcoe_ctlr_solicit(fip, fcf);
/* * If its been a while since we did a solicit, and this is * the first advertisement we've received, do a multicast * solicitation to gather as many advertisements as we can * before selection occurs.
*/ if (first && time_after(jiffies, fip->sol_time + sol_tov))
fcoe_ctlr_solicit(fip, NULL);
/* * Put this FCF at the head of the list for priority among equals. * This helps in the case of an NPV switch which insists we use * the FCF that answers multicast solicitations, not the others that * are sending periodic multicast advertisements.
*/ if (mtu_valid)
list_move(&fcf->list, &fip->fcfs);
/* * If this is the first validated FCF, note the time and * set a timer to trigger selection.
*/ if (mtu_valid && !fip->sel_fcf && !fip->sel_time &&
fcoe_ctlr_fcf_usable(fcf)) {
fip->sel_time = jiffies +
msecs_to_jiffies(FCOE_CTLR_START_DELAY); if (!timer_pending(&fip->timer) ||
time_before(fip->sel_time, fip->timer.expires))
mod_timer(&fip->timer, fip->sel_time);
}
desc = (struct fip_desc *)(fiph + 1); while (rlen > 0) {
desc_cnt++;
dlen = desc->fip_dlen * FIP_BPW; if (dlen < sizeof(*desc) || dlen > rlen) goto drop; /* Drop ELS if there are duplicate critical descriptors */ if (desc->fip_dtype < 32) { if ((desc->fip_dtype != FIP_DT_MAC) &&
(desc_mask & 1U << desc->fip_dtype)) {
LIBFCOE_FIP_DBG(fip, "Duplicate Critical " "Descriptors in FIP ELS\n"); goto drop;
}
desc_mask |= (1 << desc->fip_dtype);
} switch (desc->fip_dtype) { case FIP_DT_MAC:
sel = fip->sel_fcf; if (desc_cnt == 1) {
LIBFCOE_FIP_DBG(fip, "FIP descriptors " "received out of order\n"); goto drop;
} /* * Some switch implementations send two MAC descriptors, * with first MAC(granted_mac) being the FPMA, and the * second one(fcoe_mac) is used as destination address * for sending/receiving FCoE packets. FIP traffic is * sent using fip_mac. For regular switches, both * fip_mac and fcoe_mac would be the same.
*/ if (desc_cnt == 2)
memcpy(granted_mac,
((struct fip_mac_desc *)desc)->fd_mac,
ETH_ALEN);
if (dlen != sizeof(struct fip_mac_desc)) goto len_err;
if ((desc_cnt == 3) && (sel))
memcpy(sel->fcoe_mac,
((struct fip_mac_desc *)desc)->fd_mac,
ETH_ALEN); break; case FIP_DT_FLOGI: case FIP_DT_FDISC: case FIP_DT_LOGO: case FIP_DT_ELP: if (desc_cnt != 1) {
LIBFCOE_FIP_DBG(fip, "FIP descriptors " "received out of order\n"); goto drop;
} if (fh) goto drop; if (dlen < sizeof(*els) + sizeof(*fh) + 1) goto len_err;
els_len = dlen - sizeof(*els);
els = (struct fip_encaps *)desc;
fh = (struct fc_frame_header *)(els + 1);
els_dtype = desc->fip_dtype; break; default:
LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x " "in FIP adv\n", desc->fip_dtype); /* standard says ignore unknown descriptors >= 128 */ if (desc->fip_dtype < FIP_DT_NON_CRITICAL) goto drop; if (desc_cnt <= 2) {
LIBFCOE_FIP_DBG(fip, "FIP descriptors " "received out of order\n"); goto drop;
} break;
}
desc = (struct fip_desc *)((char *)desc + dlen);
rlen -= dlen;
}
if (!fh) goto drop;
els_op = *(u8 *)(fh + 1);
if ((els_dtype == FIP_DT_FLOGI || els_dtype == FIP_DT_FDISC) &&
sub == FIP_SC_REP && fip->mode != FIP_MODE_VN2VN) { if (els_op == ELS_LS_ACC) { if (!is_valid_ether_addr(granted_mac)) {
LIBFCOE_FIP_DBG(fip, "Invalid MAC address %pM in FIP ELS\n",
granted_mac); goto drop;
}
memcpy(fr_cb(fp)->granted_mac, granted_mac, ETH_ALEN);
if (fip->flogi_oxid == ntohs(fh->fh_ox_id)) {
fip->flogi_oxid = FC_XID_UNKNOWN; if (els_dtype == FIP_DT_FLOGI)
fcoe_ctlr_announce(fip);
}
} elseif (els_dtype == FIP_DT_FLOGI &&
!fcoe_ctlr_flogi_retry(fip)) goto drop; /* retrying FLOGI so drop reject */
}
len_err:
LIBFCOE_FIP_DBG(fip, "FIP length error in descriptor type %x len %zu\n",
desc->fip_dtype, dlen);
drop:
kfree_skb(skb);
}
/** * fcoe_ctlr_recv_clr_vlink() - Handle an incoming link reset frame * @fip: The FCoE controller that received the frame * @skb: The received FIP packet * * There may be multiple VN_Port descriptors. * The overall length has already been checked.
*/ staticvoid fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip, struct sk_buff *skb)
{ struct fip_desc *desc; struct fip_mac_desc *mp; struct fip_wwn_desc *wp; struct fip_vn_desc *vp;
size_t rlen;
size_t dlen; struct fcoe_fcf *fcf = fip->sel_fcf; struct fc_lport *lport = fip->lp; struct fc_lport *vn_port = NULL;
u32 desc_mask; int num_vlink_desc; int reset_phys_port = 0; struct fip_vn_desc **vlink_desc_arr = NULL; struct fip_header *fh = (struct fip_header *)skb->data; struct ethhdr *eh = eth_hdr(skb);
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
if (!fcf) { /* * We are yet to select best FCF, but we got CVL in the * meantime. reset the ctlr and let it rediscover the FCF
*/
LIBFCOE_FIP_DBG(fip, "Resetting fcoe_ctlr as FCF has not been " "selected yet\n");
mutex_lock(&fip->ctlr_mutex);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex); return;
}
/* * If we've selected an FCF check that the CVL is from there to avoid * processing CVLs from an unexpected source. If it is from an * unexpected source drop it on the floor.
*/ if (!ether_addr_equal(eh->h_source, fcf->fcf_mac)) {
LIBFCOE_FIP_DBG(fip, "Dropping CVL due to source address " "mismatch with FCF src=%pM\n", eh->h_source); return;
}
/* * If we haven't logged into the fabric but receive a CVL we should * reset everything and go back to solicitation.
*/ if (!lport->port_id) {
LIBFCOE_FIP_DBG(fip, "lport not logged in, resoliciting\n");
mutex_lock(&fip->ctlr_mutex);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
fc_lport_reset(fip->lp);
fcoe_ctlr_solicit(fip, NULL); return;
}
/* * mask of required descriptors. Validating each one clears its bit.
*/
desc_mask = BIT(FIP_DT_MAC) | BIT(FIP_DT_NAME);
/* * Actually need to subtract 'sizeof(*mp) - sizeof(*wp)' from 'rlen' * before determining max Vx_Port descriptor but a buggy FCF could have * omitted either or both MAC Address and Name Identifier descriptors
*/
num_vlink_desc = rlen / sizeof(*vp); if (num_vlink_desc)
vlink_desc_arr = kmalloc_array(num_vlink_desc, sizeof(vp),
GFP_ATOMIC); if (!vlink_desc_arr) return;
num_vlink_desc = 0;
while (rlen >= sizeof(*desc)) {
dlen = desc->fip_dlen * FIP_BPW; if (dlen > rlen) goto err; /* Drop CVL if there are duplicate critical descriptors */ if ((desc->fip_dtype < 32) &&
(desc->fip_dtype != FIP_DT_VN_ID) &&
!(desc_mask & 1U << desc->fip_dtype)) {
LIBFCOE_FIP_DBG(fip, "Duplicate Critical " "Descriptors in FIP CVL\n"); goto err;
} switch (desc->fip_dtype) { case FIP_DT_MAC:
mp = (struct fip_mac_desc *)desc; if (dlen < sizeof(*mp)) goto err; if (!ether_addr_equal(mp->fd_mac, fcf->fcf_mac)) goto err;
desc_mask &= ~BIT(FIP_DT_MAC); break; case FIP_DT_NAME:
wp = (struct fip_wwn_desc *)desc; if (dlen < sizeof(*wp)) goto err; if (get_unaligned_be64(&wp->fd_wwn) != fcf->switch_name) goto err;
desc_mask &= ~BIT(FIP_DT_NAME); break; case FIP_DT_VN_ID:
vp = (struct fip_vn_desc *)desc; if (dlen < sizeof(*vp)) goto err;
vlink_desc_arr[num_vlink_desc++] = vp;
vn_port = fc_vport_id_lookup(lport,
ntoh24(vp->fd_fc_id)); if (vn_port && (vn_port == lport)) {
mutex_lock(&fip->ctlr_mutex);
this_cpu_inc(lport->stats->VLinkFailureCount);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
} break; default: /* standard says ignore unknown descriptors >= 128 */ if (desc->fip_dtype < FIP_DT_NON_CRITICAL) goto err; break;
}
desc = (struct fip_desc *)((char *)desc + dlen);
rlen -= dlen;
}
/* * reset only if all required descriptors were present and valid.
*/ if (desc_mask)
LIBFCOE_FIP_DBG(fip, "missing descriptors mask %x\n",
desc_mask); elseif (!num_vlink_desc) {
LIBFCOE_FIP_DBG(fip, "CVL: no Vx_Port descriptor found\n"); /* * No Vx_Port description. Clear all NPIV ports, * followed by physical port
*/
mutex_lock(&fip->ctlr_mutex);
this_cpu_inc(lport->stats->VLinkFailureCount);
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
fc_lport_reset(fip->lp);
fcoe_ctlr_solicit(fip, NULL);
} else { int i;
LIBFCOE_FIP_DBG(fip, "performing Clear Virtual Link\n"); for (i = 0; i < num_vlink_desc; i++) {
vp = vlink_desc_arr[i];
vn_port = fc_vport_id_lookup(lport,
ntoh24(vp->fd_fc_id)); if (!vn_port) continue;
/* * 'port_id' is already validated, check MAC address and * wwpn
*/ if (!ether_addr_equal(fip->get_src_addr(vn_port),
vp->fd_mac) ||
get_unaligned_be64(&vp->fd_wwpn) !=
vn_port->wwpn) continue;
if (vn_port == lport) /* * Physical port, defer processing till all * listed NPIV ports are cleared
*/
reset_phys_port = 1; else/* NPIV port */
fc_lport_reset(vn_port);
}
if (reset_phys_port) {
fc_lport_reset(fip->lp);
fcoe_ctlr_solicit(fip, NULL);
}
}
err:
kfree(vlink_desc_arr);
}
/** * fcoe_ctlr_recv() - Receive a FIP packet * @fip: The FCoE controller that received the packet * @skb: The received FIP packet * * This may be called from either NET_RX_SOFTIRQ or IRQ.
*/ void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return;
skb_queue_tail(&fip->fip_recv_list, skb);
schedule_work(&fip->recv_work);
}
EXPORT_SYMBOL(fcoe_ctlr_recv);
/** * fcoe_ctlr_recv_handler() - Receive a FIP frame * @fip: The FCoE controller that received the frame * @skb: The received FIP frame * * Returns non-zero if the frame is dropped.
*/ staticint fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
{ struct fip_header *fiph; struct ethhdr *eh; enum fip_state state; bool fip_vlan_resp = false;
u16 op;
u8 sub;
if (skb_linearize(skb)) goto drop; if (skb->len < sizeof(*fiph)) goto drop;
eh = eth_hdr(skb); if (fip->mode == FIP_MODE_VN2VN) { if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) &&
!ether_addr_equal(eh->h_dest, fcoe_all_vn2vn) &&
!ether_addr_equal(eh->h_dest, fcoe_all_p2p)) goto drop;
} elseif (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) &&
!ether_addr_equal(eh->h_dest, fcoe_all_enode)) goto drop;
fiph = (struct fip_header *)skb->data;
op = ntohs(fiph->fip_op);
sub = fiph->fip_subcode;
if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) goto drop; if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) goto drop;
mutex_lock(&fip->ctlr_mutex);
state = fip->state; if (state == FIP_ST_AUTO) {
fip->map_dest = 0;
fcoe_ctlr_set_state(fip, FIP_ST_ENABLED);
state = FIP_ST_ENABLED;
LIBFCOE_FIP_DBG(fip, "Using FIP mode\n");
}
fip_vlan_resp = fip->fip_resp;
mutex_unlock(&fip->ctlr_mutex);
if (fip->mode == FIP_MODE_VN2VN && op == FIP_OP_VN2VN) return fcoe_ctlr_vn_recv(fip, skb);
if (fip_vlan_resp && op == FIP_OP_VLAN) {
LIBFCOE_FIP_DBG(fip, "fip vlan discovery\n"); return fcoe_ctlr_vlan_recv(fip, skb);
}
if (state != FIP_ST_ENABLED && state != FIP_ST_VNMP_UP &&
state != FIP_ST_VNMP_CLAIM) goto drop;
if (op == FIP_OP_DISC && sub == FIP_SC_ADV)
fcoe_ctlr_recv_adv(fip, skb); elseif (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK)
fcoe_ctlr_recv_clr_vlink(fip, skb);
kfree_skb(skb); return 0;
drop:
kfree_skb(skb); return -1;
}
/** * fcoe_ctlr_select() - Select the best FCF (if possible) * @fip: The FCoE controller * * Returns the selected FCF, or NULL if none are usable. * * If there are conflicting advertisements, no FCF can be chosen. * * If there is already a selected FCF, this will choose a better one or * an equivalent one that hasn't already been sent a FLOGI. * * Called with lock held.
*/ staticstruct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
{ struct fcoe_fcf *fcf; struct fcoe_fcf *best = fip->sel_fcf;
list_for_each_entry(fcf, &fip->fcfs, list) {
LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx " "VFID %d mac %pM map %x val %d " "sent %u pri %u\n",
fcf->fabric_name, fcf->vfid, fcf->fcf_mac,
fcf->fc_map, fcoe_ctlr_mtu_valid(fcf),
fcf->flogi_sent, fcf->pri); if (!fcoe_ctlr_fcf_usable(fcf)) {
LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx " "map %x %svalid %savailable\n",
fcf->fabric_name, fcf->fc_map,
(fcf->flags & FIP_FL_SOL) ? "" : "in",
(fcf->flags & FIP_FL_AVAIL) ? "" : "un"); continue;
} if (!best || fcf->pri < best->pri || best->flogi_sent)
best = fcf; if (fcf->fabric_name != best->fabric_name ||
fcf->vfid != best->vfid ||
fcf->fc_map != best->fc_map) {
LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, " "or FC-MAP\n"); return NULL;
}
}
fip->sel_fcf = best; if (best) {
LIBFCOE_FIP_DBG(fip, "using FCF mac %pM\n", best->fcf_mac);
fip->port_ka_time = jiffies +
msecs_to_jiffies(FIP_VN_KA_PERIOD);
fip->ctlr_ka_time = jiffies + best->fka_period; if (time_before(fip->ctlr_ka_time, fip->timer.expires))
mod_timer(&fip->timer, fip->ctlr_ka_time);
} return best;
}
/** * fcoe_ctlr_flogi_send_locked() - send FIP-encapsulated FLOGI to current FCF * @fip: The FCoE controller * * Returns non-zero error if it could not be sent. * * Called with ctlr_mutex and ctlr_lock held. * Caller must verify that fip->sel_fcf is not NULL.
*/ staticint fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip)
{ struct sk_buff *skb; struct sk_buff *skb_orig; struct fc_frame_header *fh; int error;
skb_orig = fip->flogi_req; if (!skb_orig) return -EINVAL;
/* * Clone and send the FLOGI request. If clone fails, use original.
*/
skb = skb_clone(skb_orig, GFP_ATOMIC); if (!skb) {
skb = skb_orig;
fip->flogi_req = NULL;
}
fh = (struct fc_frame_header *)skb->data;
error = fcoe_ctlr_encaps(fip, fip->lp, FIP_DT_FLOGI, skb,
ntoh24(fh->fh_d_id)); if (error) {
kfree_skb(skb); return error;
}
fip->send(fip, skb);
fip->sel_fcf->flogi_sent = 1; return 0;
}
/** * fcoe_ctlr_flogi_retry() - resend FLOGI request to a new FCF if possible * @fip: The FCoE controller * * Returns non-zero error code if there's no FLOGI request to retry or * no alternate FCF available.
*/ staticint fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip)
{ struct fcoe_fcf *fcf; int error;
/** * fcoe_ctlr_flogi_send() - Handle sending of FIP FLOGI. * @fip: The FCoE controller that timed out * * Done here because fcoe_ctlr_els_send() can't get mutex. * * Called with ctlr_mutex held. The caller must not hold ctlr_lock.
*/ staticvoid fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip)
{ struct fcoe_fcf *fcf;
spin_lock_bh(&fip->ctlr_lock);
fcf = fip->sel_fcf; if (!fcf || !fip->flogi_req_send) goto unlock;
LIBFCOE_FIP_DBG(fip, "sending FLOGI\n");
/* * If this FLOGI is being sent due to a timeout retry * to the same FCF as before, select a different FCF if possible.
*/ if (fcf->flogi_sent) {
LIBFCOE_FIP_DBG(fip, "sending FLOGI - reselect\n");
fcf = fcoe_ctlr_select(fip); if (!fcf || fcf->flogi_sent) {
LIBFCOE_FIP_DBG(fip, "sending FLOGI - clearing\n");
list_for_each_entry(fcf, &fip->fcfs, list)
fcf->flogi_sent = 0;
fcf = fcoe_ctlr_select(fip);
}
} if (fcf) {
fcoe_ctlr_flogi_send_locked(fip);
fip->flogi_req_send = 0;
} else/* XXX */
LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n");
unlock:
spin_unlock_bh(&fip->ctlr_lock);
}
/** * fcoe_ctlr_timeout() - FIP timeout handler * @t: Timer context use to obtain the controller reference
*/ staticvoid fcoe_ctlr_timeout(struct timer_list *t)
{ struct fcoe_ctlr *fip = timer_container_of(fip, t, timer);
schedule_work(&fip->timer_work);
}
/** * fcoe_ctlr_timer_work() - Worker thread function for timer work * @work: Handle to a FCoE controller * * Ages FCFs. Triggers FCF selection if possible. * Sends keep-alives and resets.
*/ staticvoid fcoe_ctlr_timer_work(struct work_struct *work)
{ struct fcoe_ctlr *fip; struct fc_lport *vport;
u8 *mac;
u8 reset = 0;
u8 send_ctlr_ka = 0;
u8 send_port_ka = 0; struct fcoe_fcf *sel; struct fcoe_fcf *fcf; unsignedlong next_timer;
/** * fcoe_ctlr_recv_flogi() - Snoop pre-FIP receipt of FLOGI response * @fip: The FCoE controller * @lport: The local port * @fp: The FC frame to snoop * * Snoop potential response to FLOGI or even incoming FLOGI. * * The caller has checked that we are waiting for login as indicated * by fip->flogi_oxid != FC_XID_UNKNOWN. * * The caller is responsible for freeing the frame. * Fill in the granted_mac address. * * Return non-zero if the frame should not be delivered to libfc.
*/ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport, struct fc_frame *fp)
{ struct fc_frame_header *fh;
u8 op;
u8 *sa;
sa = eth_hdr(&fp->skb)->h_source;
fh = fc_frame_header_get(fp); if (fh->fh_type != FC_TYPE_ELS) return 0;
op = fc_frame_payload_op(fp); if (op == ELS_LS_ACC && fh->fh_r_ctl == FC_RCTL_ELS_REP &&
fip->flogi_oxid == ntohs(fh->fh_ox_id)) {
/* * FLOGI accepted. * If the src mac addr is FC_OUI-based, then we mark the * address_mode flag to use FC_OUI-based Ethernet DA. * Otherwise we use the FCoE gateway addr
*/ if (ether_addr_equal(sa, (u8[6])FC_FCOE_FLOGI_MAC)) {
fcoe_ctlr_map_dest(fip);
} else {
memcpy(fip->dest_addr, sa, ETH_ALEN);
fip->map_dest = 0;
}
fip->flogi_oxid = FC_XID_UNKNOWN;
mutex_unlock(&fip->ctlr_mutex);
fc_fcoe_set_mac(fr_cb(fp)->granted_mac, fh->fh_d_id);
} elseif (op == ELS_FLOGI && fh->fh_r_ctl == FC_RCTL_ELS_REQ && sa) { /* * Save source MAC for point-to-point responses.
*/
mutex_lock(&fip->ctlr_mutex); if (fip->state == FIP_ST_AUTO || fip->state == FIP_ST_NON_FIP) {
memcpy(fip->dest_addr, sa, ETH_ALEN);
fip->map_dest = 0; if (fip->state == FIP_ST_AUTO)
LIBFCOE_FIP_DBG(fip, "received non-FIP FLOGI. " "Setting non-FIP mode\n");
fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP);
}
mutex_unlock(&fip->ctlr_mutex);
} return 0;
}
EXPORT_SYMBOL(fcoe_ctlr_recv_flogi);
/** * fcoe_wwn_from_mac() - Converts a 48-bit IEEE MAC address to a 64-bit FC WWN * @mac: The MAC address to convert * @scheme: The scheme to use when converting * @port: The port indicator for converting * * Returns: u64 fc world wide name
*/
u64 fcoe_wwn_from_mac(unsignedchar mac[ETH_ALEN], unsignedint scheme, unsignedint port)
{
u64 wwn;
u64 host_mac;
/* The MAC is in NO, so flip only the low 48 bits */
host_mac = ((u64) mac[0] << 40) |
((u64) mac[1] << 32) |
((u64) mac[2] << 24) |
((u64) mac[3] << 16) |
((u64) mac[4] << 8) |
(u64) mac[5];
/** * fcoe_ctlr_vn_rport_callback - Event handler for rport events. * @lport: The lport which is receiving the event * @rdata: remote port private data * @event: The event that occurred * * Locking Note: The rport lock must not be held when calling this function.
*/ staticvoid fcoe_ctlr_vn_rport_callback(struct fc_lport *lport, struct fc_rport_priv *rdata, enum fc_rport_event event)
{ struct fcoe_ctlr *fip = lport->disc.priv; struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
/** * fcoe_ctlr_disc_stop_locked() - stop discovery in VN2VN mode * @lport: The local port * * Called with ctlr_mutex held.
*/ staticvoid fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
{ struct fc_rport_priv *rdata;
/** * fcoe_ctlr_disc_stop() - stop discovery in VN2VN mode * @lport: The local port * * Called through the local port template for discovery. * Called without the ctlr_mutex held.
*/ staticvoid fcoe_ctlr_disc_stop(struct fc_lport *lport)
{ struct fcoe_ctlr *fip = lport->disc.priv;
/** * fcoe_ctlr_disc_stop_final() - stop discovery for shutdown in VN2VN mode * @lport: The local port * * Called through the local port template for discovery. * Called without the ctlr_mutex held.
*/ staticvoid fcoe_ctlr_disc_stop_final(struct fc_lport *lport)
{
fcoe_ctlr_disc_stop(lport);
fc_rport_flush_queue();
synchronize_rcu();
}
/** * fcoe_ctlr_vn_restart() - VN2VN probe restart with new port_id * @fip: The FCoE controller * * Called with fcoe_ctlr lock held.
*/ staticvoid fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
{ unsignedlong wait;
u32 port_id;
fcoe_ctlr_disc_stop_locked(fip->lp);
/* * Get proposed port ID. * If this is the first try after link up, use any previous port_id. * If there was none, use the low bits of the port_name. * On subsequent tries, get the next random one. * Don't use reserved IDs, use another non-zero value, just as random.
*/
port_id = fip->port_id; if (fip->probe_tries)
port_id = prandom_u32_state(&fip->rnd_state) & 0xffff; elseif (!port_id)
port_id = fip->lp->wwpn & 0xffff; if (!port_id || port_id == 0xffff)
port_id = 1;
fip->port_id = port_id;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.