/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2009-2012 Cavium, Inc
*/
#define DRV_NAME "octeon_mgmt" #define DRV_DESCRIPTION \ "Cavium Networks Octeon MII (management) port Network Driver"
#define OCTEON_MGMT_NAPI_WEIGHT 16
/* Ring sizes that are powers of two allow for more efficient modulo * opertions.
*/ #define OCTEON_MGMT_RX_RING_SIZE 512 #define OCTEON_MGMT_TX_RING_SIZE 128
/* Allow 8 bytes for vlan and FCS. */ #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
union mgmt_port_ring_entry {
u64 d64; struct { #define RING_ENTRY_CODE_DONE 0xf #define RING_ENTRY_CODE_MORE 0x10 #ifdef __BIG_ENDIAN_BITFIELD
u64 reserved_62_63:2; /* Length of the buffer/packet in bytes */
u64 len:14; /* For TX, signals that the packet should be timestamped */
u64 tstamp:1; /* The RX error code */
u64 code:7; /* Physical address of the buffer */
u64 addr:40; #else
u64 addr:40;
u64 code:7;
u64 tstamp:1;
u64 len:14;
u64 reserved_62_63:2; #endif
} s;
};
/* Read the hardware TX timestamp if one was recorded */ if (unlikely(re.s.tstamp)) { struct skb_shared_hwtstamps ts;
u64 ns;
memset(&ts, 0, sizeof(ts)); /* Read the timestamp */
ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); /* Remove the timestamp from the FIFO */
cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); /* Tell the kernel about the timestamp */
ts.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &ts);
}
/* These reads also clear the count registers. */
drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
if (drop || bad) { /* Do an atomic update. */
spin_lock_irqsave(&p->lock, flags);
netdev->stats.rx_errors += bad;
netdev->stats.rx_dropped += drop;
spin_unlock_irqrestore(&p->lock, flags);
}
}
/* * Dequeue a receive skb and its corresponding ring entry. The ring * entry is returned, *pskb is updated to point to the skb.
*/ static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, struct sk_buff **pskb)
{ union mgmt_port_ring_entry re;
staticint octeon_mgmt_receive_one(struct octeon_mgmt *p)
{ struct net_device *netdev = p->netdev; union cvmx_mixx_ircnt mix_ircnt; union mgmt_port_ring_entry re; struct sk_buff *skb; struct sk_buff *skb2; struct sk_buff *skb_new; union mgmt_port_ring_entry re2; int rc = 1;
re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { /* A good packet, send it up. */
skb_put(skb, re.s.len);
good: /* Process the RX timestamp if it was recorded */ if (p->has_rx_tstamp) { /* The first 8 bytes are the timestamp */
u64 ns = *(u64 *)skb->data; struct skb_shared_hwtstamps *ts;
ts = skb_hwtstamps(skb);
ts->hwtstamp = ns_to_ktime(ns);
__skb_pull(skb, 8);
}
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += skb->len;
netif_receive_skb(skb);
rc = 0;
} elseif (re.s.code == RING_ENTRY_CODE_MORE) { /* Packet split across skbs. This can happen if we * increase the MTU. Buffers that are already in the * rx ring can then end up being too small. As the rx * ring is refilled, buffers sized for the new MTU * will be used and we should go back to the normal * non-split case.
*/
skb_put(skb, re.s.len); do {
re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); if (re2.s.code != RING_ENTRY_CODE_MORE
&& re2.s.code != RING_ENTRY_CODE_DONE) goto split_error;
skb_put(skb2, re2.s.len);
skb_new = skb_copy_expand(skb, 0, skb2->len,
GFP_ATOMIC); if (!skb_new) goto split_error; if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
skb2->len)) goto split_error;
skb_put(skb_new, skb2->len);
dev_kfree_skb_any(skb);
dev_kfree_skb_any(skb2);
skb = skb_new;
} while (re2.s.code == RING_ENTRY_CODE_MORE); goto good;
} else { /* Some other error, discard it. */
dev_kfree_skb_any(skb); /* Error statistics are accumulated in * octeon_mgmt_update_rx_stats.
*/
} goto done;
split_error: /* Discard the whole mess. */
dev_kfree_skb_any(skb);
dev_kfree_skb_any(skb2); while (re2.s.code == RING_ENTRY_CODE_MORE) {
re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
dev_kfree_skb_any(skb2);
}
netdev->stats.rx_errors++;
done: /* Tell the hardware we processed a packet. */
mix_ircnt.u64 = 0;
mix_ircnt.s.ircnt = 1;
cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); return rc;
}
staticint octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
{ unsignedint work_done = 0; union cvmx_mixx_ircnt mix_ircnt; int rc;
if (work_done < budget) { /* We stopped because no more packets were available. */
napi_complete_done(napi, work_done);
octeon_mgmt_enable_rx_irq(p);
}
octeon_mgmt_update_rx_stats(netdev);
return work_done;
}
/* Reset the hardware to clean state. */ staticvoid octeon_mgmt_reset_hw(struct octeon_mgmt *p)
{ union cvmx_mixx_ctl mix_ctl; union cvmx_mixx_bist mix_bist; union cvmx_agl_gmx_bist agl_gmx_bist;
staticint octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
{ int r = eth_mac_addr(netdev, addr);
if (r) return r;
octeon_mgmt_set_rx_filtering(netdev);
return 0;
}
staticint octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
{ struct octeon_mgmt *p = netdev_priv(netdev); int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
WRITE_ONCE(netdev->mtu, new_mtu);
/* HW lifts the limit if the frame is VLAN tagged * (+4 bytes per each tag, up to two tags)
*/
cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet); /* Set the hardware to truncate packets larger than the MTU. The jabber * register must be set to a multiple of 8 bytes, so round up. JABBER is * an unconditional limit, so we need to account for two possible VLAN * tags.
*/
cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
(max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
if (mixx_isr.s.irthresh) {
octeon_mgmt_disable_rx_irq(p);
napi_schedule(&p->napi);
} if (mixx_isr.s.orthresh) {
octeon_mgmt_disable_tx_irq(p);
tasklet_schedule(&p->tx_clean_tasklet);
}
return IRQ_HANDLED;
}
staticint octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, struct ifreq *rq, int cmd)
{ struct octeon_mgmt *p = netdev_priv(netdev); struct hwtstamp_config config; union cvmx_mio_ptp_clock_cfg ptp; union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; bool have_hw_timestamps = false;
if (copy_from_user(&config, rq->ifr_data, sizeof(config))) return -EFAULT;
/* Check the status of hardware for tiemstamps */ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { /* Get the current state of the PTP clock */
ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG); if (!ptp.s.ext_clk_en) { /* The clock has not been configured to use an * external source. Program it to use the main clock * reference.
*/
u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate(); if (!ptp.s.ptp_en)
cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
netdev_info(netdev, "PTP Clock using sclk reference @ %lldHz\n",
(NSEC_PER_SEC << 32) / clock_comp);
} else { /* The clock is already programmed to use a GPIO */
u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
netdev_info(netdev, "PTP Clock using GPIO%d @ %lld Hz\n",
ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp);
}
/* Enable the clock if it wasn't done already */ if (!ptp.s.ptp_en) {
ptp.s.ptp_en = 1;
cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
}
have_hw_timestamps = true;
}
if (!have_hw_timestamps) return -EINVAL;
switch (config.tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: break; default: return -ERANGE;
}
switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE:
p->has_rx_tstamp = false;
rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
rxx_frm_ctl.s.ptp_mode = 0;
cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: case HWTSTAMP_FILTER_NTP_ALL:
p->has_rx_tstamp = have_hw_timestamps;
config.rx_filter = HWTSTAMP_FILTER_ALL; if (p->has_rx_tstamp) {
rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
rxx_frm_ctl.s.ptp_mode = 1;
cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
} break; default: return -ERANGE;
}
if (copy_to_user(rq->ifr_data, &config, sizeof(config))) return -EFAULT;
staticvoid octeon_mgmt_disable_link(struct octeon_mgmt *p)
{ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
/* Disable GMX before we make any changes. */
prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
prtx_cfg.s.en = 0;
prtx_cfg.s.tx_en = 0;
prtx_cfg.s.rx_en = 0;
cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { int i; for (i = 0; i < 10; i++) {
prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1) break;
mdelay(1);
i++;
}
}
}
staticvoid octeon_mgmt_enable_link(struct octeon_mgmt *p)
{ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
/* Restore the GMX enable state only if link is set */
prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
prtx_cfg.s.tx_en = 1;
prtx_cfg.s.rx_en = 1;
prtx_cfg.s.en = 1;
cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
}
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
prtx_cfg.s.burst = 1;
prtx_cfg.s.speed_msb = 1;
} break; case 100:
prtx_cfg.s.speed = 0;
prtx_cfg.s.slottime = 0;
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
prtx_cfg.s.burst = 1;
prtx_cfg.s.speed_msb = 0;
} break; case 1000: /* 1000 MBits is only supported on 6XXX chips */ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
prtx_cfg.s.speed = 1;
prtx_cfg.s.speed_msb = 0; /* Only matters for half-duplex */
prtx_cfg.s.slottime = 1;
prtx_cfg.s.burst = phydev->duplex;
} break; case 0: /* No link */ default: break;
}
/* Write the new GMX setting with the port still disabled. */
cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
/* Read GMX CFG again to make sure the config is completed. */
prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { union cvmx_agl_gmx_txx_clk agl_clk; union cvmx_agl_prtx_ctl prtx_ctl;
staticint octeon_mgmt_open(struct net_device *netdev)
{ struct octeon_mgmt *p = netdev_priv(netdev); union cvmx_mixx_ctl mix_ctl; union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; union cvmx_mixx_oring1 oring1; union cvmx_mixx_iring1 iring1; union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; union cvmx_mixx_irhwm mix_irhwm; union cvmx_mixx_orhwm mix_orhwm; union cvmx_mixx_intena mix_intena; struct sockaddr sa;
/* Bring it out of reset if needed. */ if (mix_ctl.s.reset) {
mix_ctl.s.reset = 0;
cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); do {
mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
} while (mix_ctl.s.reset);
}
if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
agl_gmx_inf_mode.u64 = 0;
agl_gmx_inf_mode.s.en = 1;
cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
} if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
|| OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { /* Force compensation values, as they are not * determined properly by HW
*/ union cvmx_agl_gmx_drv_ctl drv_ctl;
/* Enable the port HW. Packets are not allowed until * cvmx_mgmt_port_enable() is called.
*/
mix_ctl.u64 = 0;
mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
mix_ctl.s.en = 1; /* Enable the port */
mix_ctl.s.nbtarb = 0; /* Arbitration mode */ /* MII CB-request FIFO programmable high watermark */
mix_ctl.s.mrq_hwm = 1; #ifdef __LITTLE_ENDIAN
mix_ctl.s.lendian = 1; #endif
cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
/* Read the PHY to find the mode of the interface. */ if (octeon_mgmt_init_phy(netdev)) {
dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port); goto err_noirq;
}
/* Set the mode of the interface, RGMII/MII. */ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) { union cvmx_agl_prtx_ctl agl_prtx_ctl; int rgmii_mode =
(linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
netdev->phydev->supported) |
linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
netdev->phydev->supported)) != 0;
/* MII clocks counts are based on the 125Mhz * reference, which has an 8nS period. So our delays * need to be multiplied by this factor.
*/ #define NS_PER_PHY_CLK 8
/* Take the DLL and clock tree out of reset */
agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
agl_prtx_ctl.s.clkrst = 0; if (rgmii_mode) {
agl_prtx_ctl.s.dllrst = 0;
agl_prtx_ctl.s.clktx_byp = 0;
}
cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
/* Wait for the DLL to lock. External 125 MHz * reference clock must be stable at this point.
*/
ndelay(256 * NS_PER_PHY_CLK);
rxx_frm_ctl.u64 = 0;
rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
rxx_frm_ctl.s.pre_align = 1; /* When set, disables the length check for non-min sized pkts * with padding in the client data.
*/
rxx_frm_ctl.s.pad_len = 1; /* When set, disables the length check for VLAN pkts */
rxx_frm_ctl.s.vlan_len = 1; /* When set, PREAMBLE checking is less strict */
rxx_frm_ctl.s.pre_free = 1; /* Control Pause Frames can match station SMAC */
rxx_frm_ctl.s.ctl_smac = 0; /* Control Pause Frames can match globally assign Multicast address */
rxx_frm_ctl.s.ctl_mcst = 1; /* Forward pause information to TX block */
rxx_frm_ctl.s.ctl_bck = 1; /* Drop Control Pause Frames */
rxx_frm_ctl.s.ctl_drp = 1; /* Strip off the preamble */
rxx_frm_ctl.s.pre_strp = 1; /* This port is configured to send PREAMBLE+SFD to begin every * frame. GMX checks that the PREAMBLE is sent correctly.
*/
rxx_frm_ctl.s.pre_chk = 1;
cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
/* Configure the port duplex, speed and enables */
octeon_mgmt_disable_link(p); if (netdev->phydev)
octeon_mgmt_update_link(p);
octeon_mgmt_enable_link(p);
p->last_link = 0;
p->last_speed = 0; /* PHY is not present in simulator. The carrier is enabled * while initializing the phy for simulator, leave it enabled.
*/ if (netdev->phydev) {
netif_carrier_off(netdev);
phy_start(netdev->phydev);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.