/* * Copyright (c) 2016 Citrix Systems Inc. * Copyright (c) 2002-2005, K A Fraser * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE.
*/ #include"common.h"
#include <linux/kthread.h>
#include <xen/xen.h> #include <xen/events.h>
/* * Update the needed ring page slots for the first SKB queued. * Note that any call sequence outside the RX thread calling this function * needs to wake up the RX thread via a call of xenvif_kick_thread() * afterwards in order to avoid a race with putting the thread to sleep.
*/ staticvoid xenvif_update_needed_slots(struct xenvif_queue *queue, conststruct sk_buff *skb)
{ unsignedint needed = 0;
if (skb) {
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); if (skb_is_gso(skb))
needed++; if (skb->sw_hash)
needed++;
}
for (i = 0; i < queue->rx_copy.num; i++) { struct gnttab_copy *op;
op = &queue->rx_copy.op[i];
/* If the copy failed, overwrite the status field in * the corresponding response.
*/ if (unlikely(op->status != GNTST_okay)) { struct xen_netif_rx_response *rsp;
/* Push responses for all completed packets. */
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify); if (notify)
notify_remote_via_irq(queue->rx_irq);
staticvoid xenvif_rx_complete(struct xenvif_queue *queue, struct xenvif_pkt_state *pkt)
{ /* All responses are ready to be pushed. */
queue->rx.rsp_prod_pvt = queue->rx.req_cons;
/* Extras must go after the first data slot */ if (pkt.slot != 0 && pkt.extra_count != 0)
xenvif_rx_extra_slot(queue, &pkt, req, rsp); else
xenvif_rx_data_slot(queue, &pkt, req, rsp);
/* Wait until the guest Rx thread has work. * * The timeout needs to be adjusted based on the current head of the * queue (and not just the head at the beginning). In particular, if * the queue is initially empty an infinite timeout is used and this * needs to be reduced when a skb is queued. * * This cannot be done with wait_event_timeout() because it only * calculates the timeout once.
*/ staticvoid xenvif_wait_for_rx_work(struct xenvif_queue *queue)
{
DEFINE_WAIT(wait);
if (xenvif_have_rx_work(queue, true)) return;
for (;;) { long ret;
prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); if (xenvif_have_rx_work(queue, true)) break; if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
&queue->eoi_pending) &
(NETBK_RX_EOI | NETBK_COMMON_EOI))
xen_irq_lateeoi(queue->rx_irq, 0);
ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); if (!ret) break;
}
finish_wait(&queue->wq, &wait);
}
/* At least one queue has stalled? Disable the carrier. */
spin_lock(&vif->lock); if (vif->stalled_queues++ == 0) {
netdev_info(vif->dev, "Guest Rx stalled");
netif_carrier_off(vif->dev);
}
spin_unlock(&vif->lock);
}
if (!vif->stall_timeout)
xenvif_queue_carrier_on(queue);
for (;;) {
xenvif_wait_for_rx_work(queue);
if (kthread_should_stop()) break;
/* This frontend is found to be rogue, disable it in * kthread context. Currently this is only set when * netback finds out frontend sends malformed packet, * but we cannot disable the interface in softirq * context so we defer it here, if this thread is * associated with queue 0.
*/ if (unlikely(vif->disabled && queue->id == 0)) {
xenvif_carrier_off(vif); break;
}
if (!skb_queue_empty(&queue->rx_queue))
xenvif_rx_action(queue);
/* If the guest hasn't provided any Rx slots for a * while it's probably not responsive, drop the * carrier so packets are dropped earlier.
*/ if (vif->stall_timeout) { if (xenvif_rx_queue_stalled(queue))
xenvif_queue_carrier_off(queue); elseif (xenvif_rx_queue_ready(queue))
xenvif_queue_carrier_on(queue);
}
/* Queued packets may have foreign pages from other * domains. These cannot be queued indefinitely as * this would starve guests of grant refs and transmit * slots.
*/
xenvif_rx_queue_drop_expired(queue);
cond_resched();
}
/* Bin any remaining skbs */
xenvif_rx_queue_purge(queue);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.