// SPDX-License-Identifier: GPL-2.0-only /* * IPv4 over IEEE 1394, per RFC 2734 * IPv6 over IEEE 1394, per RFC 3146 * * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com> * * based on eth1394 by Ben Collins et al
*/
/* This list keeps track of what parts of the datagram have been filled in */ struct fwnet_fragment_info { struct list_head fi_link;
u16 offset;
u16 len;
};
struct fwnet_device { struct list_head dev_link;
spinlock_t lock; enum {
FWNET_BROADCAST_ERROR,
FWNET_BROADCAST_RUNNING,
FWNET_BROADCAST_STOPPED,
} broadcast_state; struct fw_iso_context *broadcast_rcv_context; struct fw_iso_buffer broadcast_rcv_buffer; void **broadcast_rcv_buffer_ptrs; unsigned broadcast_rcv_next_ptr; unsigned num_broadcast_rcv_ptrs; unsigned rcv_buffer_size; /* * This value is the maximum unfragmented datagram size that can be * sent by the hardware. It already has the GASP overhead and the * unfragmented datagram header overhead calculated into it.
*/ unsigned broadcast_xmt_max_payload;
u16 broadcast_xmt_datagramlabel;
/* * The CSR address that remote nodes must send datagrams to for us to * receive them.
*/ struct fw_address_handler handler;
u64 local_fifo;
/* Number of tx datagrams that have been queued but not yet acked */ int queued_datagrams;
/* FIXME: is this correct for all cases? */ staticbool fwnet_frag_overlap(struct fwnet_partial_datagram *pd, unsigned offset, unsigned len)
{ struct fwnet_fragment_info *fi; unsigned end = offset + len;
list_for_each_entry(fi, &pd->fi_list, fi_link) if (offset < fi->offset + fi->len && end > fi->offset) returntrue;
returnfalse;
}
/* Assumes that new fragment does not overlap any existing fragments */ staticstruct fwnet_fragment_info *fwnet_frag_new( struct fwnet_partial_datagram *pd, unsigned offset, unsigned len)
{ struct fwnet_fragment_info *fi, *fi2, *new; struct list_head *list;
list = &pd->fi_list;
list_for_each_entry(fi, &pd->fi_list, fi_link) { if (fi->offset + fi->len == offset) { /* The new fragment can be tacked on to the end */ /* Did the new fragment plug a hole? */
fi2 = list_entry(fi->fi_link.next, struct fwnet_fragment_info, fi_link); if (fi->offset + fi->len == fi2->offset) { /* glue fragments together */
fi->len += len + fi2->len;
list_del(&fi2->fi_link);
kfree(fi2);
} else {
fi->len += len;
}
return fi;
} if (offset + len == fi->offset) { /* The new fragment can be tacked on to the beginning */ /* Did the new fragment plug a hole? */
fi2 = list_entry(fi->fi_link.prev, struct fwnet_fragment_info, fi_link); if (fi2->offset + fi2->len == fi->offset) { /* glue fragments together */
fi2->len += fi->len + len;
list_del(&fi->fi_link);
kfree(fi);
/* * Move list entry to beginning of list so that oldest partial * datagrams percolate to the end of the list
*/
list_move_tail(&pd->pd_link, &peer->pd_list);
fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link);
return fi->len == pd->datagram_size;
}
/* caller must hold dev->lock */ staticstruct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev,
u64 guid)
{ struct fwnet_peer *peer;
list_for_each_entry(peer, &dev->peer_list, peer_link) if (peer->guid == guid) return peer;
return NULL;
}
/* caller must hold dev->lock */ staticstruct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev, int node_id, int generation)
{ struct fwnet_peer *peer;
staticint fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, int source_node_id, int generation, bool is_broadcast)
{ struct sk_buff *skb; struct net_device *net = dev->netdev; struct rfc2734_header hdr; unsigned lf; unsignedlong flags; struct fwnet_peer *peer; struct fwnet_partial_datagram *pd; int fg_off; int dg_size;
u16 datagram_label; int retval;
u16 ether_type;
if (len <= RFC2374_UNFRAG_HDR_SIZE) return 0;
hdr.w0 = be32_to_cpu(buf[0]);
lf = fwnet_get_hdr_lf(&hdr); if (lf == RFC2374_HDR_UNFRAG) { /* * An unfragmented datagram has been received by the ieee1394 * bus. Build an skbuff around it so we can pass it to the * high level network layer.
*/
ether_type = fwnet_get_hdr_ether_type(&hdr);
buf++;
len -= RFC2374_UNFRAG_HDR_SIZE;
skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net)); if (unlikely(!skb)) {
net->stats.rx_dropped++;
return fwnet_finish_incoming_packet(net, skb, source_node_id, false, ether_type);
} /* * Datagram is not complete, we're done for the * moment.
*/
retval = 0;
fail:
spin_unlock_irqrestore(&dev->lock, flags);
return retval;
}
staticvoid fwnet_receive_packet(struct fw_card *card, struct fw_request *r, int tcode, int destination, int source, int generation, unsignedlonglong offset, void *payload, size_t length, void *callback_data)
{ struct fwnet_device *dev = callback_data; int rcode;
if (destination == IEEE1394_ALL_NODES) { // Although the response to the broadcast packet is not necessarily required, the // fw_send_response() function should still be called to maintain the reference // counting of the object. In the case, the call of function just releases the // object as a result to decrease the reference counting.
rcode = RCODE_COMPLETE;
} elseif (offset != dev->handler.offset) {
rcode = RCODE_ADDRESS_ERROR;
} elseif (tcode != TCODE_WRITE_BLOCK_REQUEST) {
rcode = RCODE_TYPE_ERROR;
} elseif (fwnet_incoming_packet(dev, payload, length,
source, generation, false) != 0) {
dev_err(&dev->netdev->dev, "incoming packet failure\n");
rcode = RCODE_CONFLICT_ERROR;
} else {
rcode = RCODE_COMPLETE;
}
/* Check whether we or the networking TX soft-IRQ is last user. */
free = (ptask->outstanding_pkts == 0 && ptask->enqueued); if (free)
dec_queued_datagrams(dev);
if (ptask->outstanding_pkts == 0) {
dev->netdev->stats.tx_packets++;
dev->netdev->stats.tx_bytes += skb->len;
}
/* Update the ptask to point to the next fragment and send it */
lf = fwnet_get_hdr_lf(&ptask->hdr); switch (lf) { case RFC2374_HDR_LASTFRAG: case RFC2374_HDR_UNFRAG: default:
dev_err(&dev->netdev->dev, "outstanding packet %x lf %x, header %x,%x\n",
ptask->outstanding_pkts, lf, ptask->hdr.w0,
ptask->hdr.w1);
BUG();
case RFC2374_HDR_FIRSTFRAG: /* Set frag type here for future interior fragments */
dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); break;
dev = ptask->dev;
tx_len = ptask->max_payload; switch (fwnet_get_hdr_lf(&ptask->hdr)) { case RFC2374_HDR_UNFRAG:
bufhdr = skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE);
put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); break;
case RFC2374_HDR_FIRSTFRAG: case RFC2374_HDR_INTFRAG: case RFC2374_HDR_LASTFRAG:
bufhdr = skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE);
put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1); break;
default:
BUG();
} if (ptask->dest_node == IEEE1394_ALL_NODES) {
u8 *p; int generation; int node_id; unsignedint sw_version;
/* ptask->generation may not have been set yet */
generation = dev->card->generation;
smp_rmb();
node_id = dev->card->node_id;
/* We should not transmit if broadcast_channel.valid == 0. */
fw_send_request(dev->card, &ptask->transaction,
TCODE_STREAM_DATA,
fw_stream_packet_destination_id(3,
IEEE1394_BROADCAST_CHANNEL, 0),
generation, SCODE_100, 0ULL, ptask->skb->data,
tx_len + 8, fwnet_write_complete, ptask);
spin_lock_irqsave(&dev->lock, flags);
/* If the AT work item already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); if (!free)
ptask->enqueued = true; else
dec_queued_datagrams(dev);
/* If the AT work item already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); if (!free)
ptask->enqueued = true; else
dec_queued_datagrams(dev);
spin_unlock_irqrestore(&dev->lock, flags);
netif_trans_update(dev->netdev);
out: if (free)
fwnet_free_ptask(ptask);
return 0;
}
staticvoid fwnet_fifo_stop(struct fwnet_device *dev)
{ if (dev->local_fifo == FWNET_NO_FIFO_ADDR) return;
/* FIXME: adjust it according to the min. speed of all known peers? */
dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
- IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE;
dev->broadcast_state = FWNET_BROADCAST_RUNNING;
/* Can this happen? */ if (netif_queue_stopped(dev->netdev)) {
spin_unlock_irqrestore(&dev->lock, flags);
return NETDEV_TX_BUSY;
}
ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); if (ptask == NULL) goto fail;
skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto fail;
/* * Make a copy of the driver-specific header. * We might need to rebuild the header on tx failure.
*/
memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
proto = hdr_buf.h_proto;
switch (proto) { case htons(ETH_P_ARP): case htons(ETH_P_IP): #if IS_ENABLED(CONFIG_IPV6) case htons(ETH_P_IPV6): #endif break; default: goto fail;
}
/* * Set the transmission type for the packet. ARP packets and IP * broadcast packets are sent via GASP.
*/ if (fwnet_hwaddr_is_multicast(hdr_buf.h_dest)) {
max_payload = dev->broadcast_xmt_max_payload;
datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
/* Does it all fit in one packet? */ if (dg_size <= max_payload) {
fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto));
ptask->outstanding_pkts = 1;
max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE;
} else {
u16 datagram_label;
if (ptask)
kmem_cache_free(fwnet_packet_task_cache, ptask);
if (skb != NULL)
dev_kfree_skb(skb);
net->stats.tx_dropped++;
net->stats.tx_errors++;
/* * FIXME: According to a patch from 2003-02-26, "returning non-zero * causes serious problems" here, allegedly. Before that patch, * -ERRNO was returned which is not appropriate under Linux 2.6. * Perhaps more needs to be done? Stop the queue in serious * conditions and restart it elsewhere?
*/ return NETDEV_TX_OK;
}
MODULE_AUTHOR("Jay Fenlason ");
MODULE_DESCRIPTION("IP over IEEE1394 as per RFC 2734/3146");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
Messung V0.5
¤ Dauer der Verarbeitung: 0.16 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.